From 8dce0c624fc0aeb1cd210461c95156ff3405bfa2 Mon Sep 17 00:00:00 2001 From: Nathaniel Caza Date: Tue, 28 May 2019 14:25:38 -0500 Subject: [PATCH] initial commit Co-authored-by: Adam Westman Co-authored-by: Arundhati Rao Co-authored-by: Mitch Cimenski Co-authored-by: Nathaniel Cook --- .gitignore | 57 + CODE_OF_CONDUCT.md | 74 + CONTRIBUTING.md | 39 + CONTRIBUTORS | 21 + LICENSE.md | 13 + Makefile | 170 + README.md | 52 + alert/alert.go | 78 + alert/alert_test.go | 37 + alert/dedup.go | 85 + alert/legacysearch.go | 203 + alert/log.go | 40 + alert/log/entry.go | 27 + alert/log/legacylogs.go | 108 + alert/log/meta.go | 14 + alert/log/rawentry.go | 170 + alert/log/rawjson.go | 30 + alert/log/store.go | 739 + alert/log/subject.go | 86 + alert/log/type.go | 42 + alert/logentryfetcher.go | 49 + alert/search.go | 177 + alert/source.go | 37 + alert/state.go | 13 + alert/status.go | 38 + alert/store.go | 817 + alert/summary.go | 11 + app/app.go | 149 + app/appconfig.go | 52 + app/clusterexporter.go | 38 + app/cmd.go | 618 + app/contextlocker.go | 128 + app/cooldown.go | 64 + app/getsetconfig.go | 73 + app/healthcheck.go | 44 + app/initauth.go | 53 + app/initengine.go | 57 + app/initgraphql.go | 68 + app/inithttp.go | 220 + app/inithttputil.go | 67 + app/inithttputil_test.go | 146 + app/initslack.go | 20 + app/initstores.go | 227 + app/inittwilio.go | 34 + app/lifecycle/manager.go | 402 + app/lifecycle/manager_test.go | 196 + app/lifecycle/pauseable.go | 15 + app/lifecycle/pauseresumer.go | 123 + app/lifecycle/pauseresumer_test.go | 120 + app/listenevents.go | 78 + app/listenstatus.go | 51 + app/logexporter.go | 20 + app/middleware.go | 169 + app/middlewaregzip.go | 67 + app/pause.go | 54 + app/recoverexporter.go | 39 + app/runapp.go | 48 + app/shutdown.go | 73 + app/shutdownsignals_unix.go | 10 + app/startup.go | 94 + app/tracing.go | 78 + app/trigger.go | 12 + app/version.go | 9 + assignment/assignment.go | 6 + assignment/doc.go | 7 + assignment/source.go | 74 + assignment/srctype.go | 32 + assignment/srctype_string.go | 16 + assignment/target.go | 124 + assignment/targettype.go | 99 + assignment/targettype_string.go | 35 + auth/basic/config.go | 5 + auth/basic/db.go | 89 + auth/basic/doc.go | 2 + auth/basic/identityprovider.go | 58 + auth/basic/provider.go | 21 + auth/cookies.go | 38 + auth/faildelay.go | 37 + auth/gettoken.go | 49 + auth/github/config.go | 13 + auth/github/doc.go | 2 + auth/github/identityprovider.go | 234 + auth/github/provider.go | 57 + auth/handler.go | 595 + auth/identityprovider.go | 62 + auth/nonce/nonce.go | 105 + auth/oidc/config.go | 12 + auth/oidc/identityprovider.go | 287 + auth/routeinfo.go | 13 + cmd/goalert/main.go | 18 + config/config.go | 310 + config/context.go | 19 + config/mergejson.go | 50 + config/mergejson_test.go | 18 + config/source.go | 21 + config/store.go | 304 + dataloader/alertloader.go | 89 + dataloader/cmloader.go | 52 + dataloader/loader.go | 226 + dataloader/loader_test.go | 53 + dataloader/policyloader.go | 49 + dataloader/rotationloader.go | 49 + dataloader/scheduleloader.go | 49 + dataloader/serviceloader.go | 49 + dataloader/userloader.go | 49 + devtools/ci/dockerfiles/all-in-one/Dockerfile | 3 + devtools/ci/dockerfiles/all-in-one/README.md | 10 + devtools/ci/dockerfiles/all-in-one/start.sh | 17 + devtools/ci/dockerfiles/build-env/Dockerfile | 7 + devtools/ci/tasks/build-debug.yml | 24 + devtools/ci/tasks/build-test.yml | 27 + devtools/ci/tasks/scripts/build-test.sh | 20 + devtools/ci/tasks/scripts/codecheck.sh | 21 + devtools/configparams/main.go | 199 + devtools/devtools.go | 4 + devtools/fetchcounts/main.go | 55 + devtools/gqlgen/gqlgen.go | 7 + devtools/inliner/linebreaker.go | 48 + devtools/inliner/main.go | 175 + devtools/mockslack/api.go | 26 + devtools/mockslack/authrevoke.go | 43 + devtools/mockslack/channelscreate.go | 105 + devtools/mockslack/chatpostmessage.go | 115 + devtools/mockslack/cmd/mockslack/main.go | 76 + devtools/mockslack/conversationsinfo.go | 61 + devtools/mockslack/conversationslist.go | 153 + devtools/mockslack/conversationslist_test.go | 67 + devtools/mockslack/groupscreate.go | 53 + devtools/mockslack/http.go | 44 + devtools/mockslack/login.go | 110 + devtools/mockslack/oauthaccess.go | 57 + devtools/mockslack/oauthauthorize.go | 110 + devtools/mockslack/permissions.go | 123 + devtools/mockslack/server.go | 204 + devtools/mockslack/state.go | 92 + devtools/mockslack/user.go | 55 + devtools/mockslack/util.go | 97 + devtools/mockslack/validate.go | 12 + devtools/mocktwilio/server.go | 213 + devtools/mocktwilio/sms.go | 168 + devtools/mocktwilio/voicecall.go | 261 + devtools/ordermigrations/main.go | 60 + devtools/resetdb/main.go | 533 + devtools/runjson/ci-cypress.json | 61 + devtools/runjson/localdev-cypress-prod.json | 101 + devtools/runjson/localdev-cypress.json | 116 + devtools/runjson/localdev.json | 65 + devtools/runjson/main.go | 42 + devtools/runjson/run.go | 96 + devtools/runjson/task.go | 168 + devtools/simpleproxy/main.go | 39 + devtools/tools.go | 7 + devtools/waitfor/main.go | 69 + engine/backend.go | 49 + engine/callback.go | 35 + engine/cleanupmanager/db.go | 56 + engine/cleanupmanager/update.go | 63 + engine/config.go | 31 + engine/engine.go | 471 + engine/escalationmanager/db.go | 262 + engine/escalationmanager/update.go | 118 + engine/heartbeatmanager/db.go | 74 + engine/heartbeatmanager/process.go | 153 + engine/message/config.go | 82 + engine/message/db.go | 767 + engine/message/message.go | 16 + engine/message/row.go | 20 + engine/message/status.go | 37 + engine/message/type.go | 12 + engine/npcyclemanager/db.go | 110 + engine/npcyclemanager/update.go | 32 + engine/processinglock/config.go | 32 + engine/processinglock/conn.go | 59 + engine/processinglock/error.go | 11 + engine/processinglock/lock.go | 94 + engine/processinglock/type.go | 81 + engine/resolver/resolver.go | 402 + engine/resolver/state.go | 8 + engine/rotationmanager/advance.go | 60 + engine/rotationmanager/db.go | 61 + engine/rotationmanager/update.go | 127 + engine/schedulemanager/db.go | 91 + engine/schedulemanager/update.go | 183 + engine/sendmessage.go | 119 + engine/statusupdatemanager/db.go | 93 + engine/statusupdatemanager/update.go | 30 + engine/verifymanager/db.go | 63 + engine/verifymanager/update.go | 42 + escalation/policy.go | 25 + escalation/policy_test.go | 36 + escalation/search.go | 121 + escalation/step.go | 41 + escalation/step_test.go | 37 + escalation/store.go | 975 + genericapi/config.go | 16 + genericapi/handler.go | 114 + go.mod | 80 + go.sum | 321 + grafana/grafana.go | 104 + graphql/alert.go | 785 + graphql/alertlog.go | 15 + graphql/assignment.go | 180 + graphql/cache.go | 209 + graphql/config.go | 55 + graphql/contactmethod.go | 246 + graphql/createall.go | 552 + graphql/createallutil.go | 206 + graphql/deleteall.go | 96 + graphql/deleteallutil.go | 64 + graphql/escalation.go | 586 + graphql/handler.go | 317 + graphql/heartbeat.go | 106 + graphql/integrationkey.go | 181 + graphql/label.go | 75 + graphql/legacydb.go | 105 + graphql/limit.go | 65 + graphql/notificationrule.go | 155 + graphql/oncallassignment.go | 21 + graphql/rotation.go | 323 + graphql/rotationparticipant.go | 322 + graphql/schedule.go | 449 + graphql/scheduleassignment.go | 139 + graphql/schedulerule.go | 288 + graphql/scheduleshift.go | 38 + graphql/schema.go | 119 + graphql/service.go | 311 + graphql/user.go | 215 + graphql/userfavorite.go | 74 + graphql/useroverride.go | 157 + graphql/util.go | 75 + graphql2/clocktime.go | 22 + graphql2/cmtype.go | 25 + graphql2/gen.go | 6 + graphql2/generated.go | 16420 ++++++++++++++++ graphql2/gqlgen.yml | 64 + graphql2/graphqlapp/alert.go | 189 + graphql2/graphqlapp/app.go | 217 + graphql2/graphqlapp/config.go | 36 + graphql2/graphqlapp/contactmethod.go | 63 + graphql2/graphqlapp/dataloaders.go | 116 + graphql2/graphqlapp/escalationpolicy.go | 393 + graphql2/graphqlapp/integrationkey.go | 56 + graphql2/graphqlapp/label.go | 95 + graphql2/graphqlapp/mutation.go | 119 + graphql2/graphqlapp/notificationrule.go | 45 + graphql2/graphqlapp/oncall.go | 16 + graphql2/graphqlapp/playground.go | 43 + graphql2/graphqlapp/query.go | 65 + graphql2/graphqlapp/rotation.go | 353 + graphql2/graphqlapp/schedule.go | 250 + graphql2/graphqlapp/schedulerule.go | 99 + graphql2/graphqlapp/service.go | 205 + graphql2/graphqlapp/slack.go | 95 + graphql2/graphqlapp/target.go | 54 + graphql2/graphqlapp/timezone.go | 60 + graphql2/graphqlapp/tx.go | 47 + graphql2/graphqlapp/user.go | 129 + graphql2/graphqlapp/useroverride.go | 143 + graphql2/isotimestamp.go | 29 + graphql2/mapconfig.go | 198 + graphql2/models_gen.go | 522 + graphql2/schema.graphql | 755 + heartbeat/monitor.go | 40 + heartbeat/state.go | 31 + heartbeat/store.go | 162 + integrationkey/integrationkey.go | 25 + integrationkey/integrationkey_test.go | 36 + integrationkey/store.go | 205 + integrationkey/type.go | 33 + internal/generatemocks.go | 56 + internal/match/assignmentmatchers.go | 53 + keyring/keys.go | 43 + keyring/store.go | 596 + keyring/store_test.go | 51 + label/label.go | 23 + label/search.go | 189 + label/store.go | 170 + limit/error.go | 90 + limit/id.go | 38 + limit/limits.go | 13 + limit/store.go | 118 + lock/global.go | 11 + logging/errors.slide | 44 + mailgun/mailgun.go | 189 + migrate/inline_data_gen.go | 763 + migrate/inline_types_gen.go | 10 + migrate/migrate.go | 342 + migrate/migrations/20170426134008-init.sql | 217 + .../migrations/20170428154209-users-table.sql | 73 + .../20170502172843-user-settings.sql | 79 + .../20170503144542-remove-carrier.sql | 17 + .../20170503144821-remove-email-verified.sql | 8 + .../20170503154907-delay-minutes.sql | 6 + migrate/migrations/20170509154250-alerts.sql | 134 + ...170515120511-escalation-policy-actions.sql | 37 + .../20170515162554-user-notifications.sql | 146 + .../20170518142432-alert-assignments.sql | 12 + .../20170530135027-schedule-rotation.sql | 349 + .../migrations/20170605131920-twilio-sms.sql | 15 + .../20170605131942-twilio-voice.sql | 13 + .../migrations/20170607103917-throttle.sql | 19 + .../20170612101232-escalation-tweaks.sql | 51 + .../migrations/20170613122551-auth-token.sql | 13 + .../20170619123628-add-constraints.sql | 9 + .../20170619164449-bobby-tables.sql | 8 + .../20170620104459-contact-constraints.sql | 12 + ...0170621141923-notification-query-fixes.sql | 99 + .../20170621170744-add-country-code.sql | 7 + .../20170623151348-on-call-alert-distinct.sql | 31 + ...0170623155346-delete-keys-with-service.sql | 15 + ...20170629104138-escalation-policy-tweak.sql | 6 + ...095448-integration-to-integration-keys.sql | 27 + .../20170706102439-esc-zero-index.sql | 51 + ...170707135355-esc-cascade-steps-actions.sql | 12 + .../20170707153545-limit-cm-per-interval.sql | 5 + .../20170710155447-fix-escalations.sql | 24 + ...0712094434-notification-policy-updates.sql | 423 + ...0713113728-escalation-schema-hardening.sql | 34 + ...20170714155817-notification-rule-tweak.sql | 49 + .../20170717151241-remove-old-esc-columns.sql | 6 + ...70717151336-remove-old-service-columns.sql | 35 + .../20170717151358-remove-old-tables.sql | 27 + .../20170717152954-ids-to-uuids.sql | 346 + .../20170724162219-fix-alert-escalations.sql | 43 + ...725105059-rotations-shift-length-check.sql | 9 + .../20170725105905-fix-shift-calculation.sql | 150 + .../20170726141849-handle-missing-users.sql | 70 + ...6143800-no-oncall-for-future-rotations.sql | 152 + .../20170726155056-twilio-sms-errors.sql | 14 + .../20170726155351-twilio-voice-errors.sql | 14 + .../20170802114735-alert_logs_enum_update.sql | 31 + .../20170802160314-add-timezones.sql | 143 + ...0808110638-user-email-nullable-allowed.sql | 10 + ...0811110036-add-generic-integration-key.sql | 47 + ...70817102712-atomic-escalation-policies.sql | 206 + ...0170818135106-add-gravatar-col-to-user.sql | 8 + ...5124926-escalation-policy-step-reorder.sql | 42 + ...42-adjust-notification-create-at-check.sql | 62 + ...0171027145352-dont-notify-disabled-cms.sql | 57 + .../20171030130758-ev3-drop-views.sql | 245 + .../20171030130759-ev3-schedule-rules.sql | 31 + ...20171030130800-ev3-notification-policy.sql | 50 + ...1030130801-ev3-escalation-policy-state.sql | 62 + .../20171030130802-ev3-rotations.sql | 16 + ...30130804-ev3-assign-schedule-rotations.sql | 43 + ...71030130806-ev3-add-rotation-ep-action.sql | 35 + .../20171030130810-ev3-notification-logs.sql | 35 + ...030130811-ev3-drop-ep-snapshot-trigger.sql | 30 + .../20171030130812-ev3-rotation-state.sql | 57 + .../20171030130813-ev3-throttle-locks.sql | 42 + ...171030150519-ev3-remove-status-trigger.sql | 36 + ...0171126093536-schedule-rule-processing.sql | 27 + .../20171201104359-structured-alert-logs.sql | 66 + .../20171201104433-add-alert-log-types.sql | 9 + ...71205125227-twilio-egress-sms-tracking.sql | 29 + ...211101108-twilio-egress-voice-tracking.sql | 31 + .../20171213141802-add-alert-source-email.sql | 6 + .../20171220113439-add-alert-dedup-keys.sql | 46 + .../20171221134500-limit-configuration.sql | 22 + ...20171221138101-notification-rule-limit.sql | 41 + .../20171221140906-contact-method-limit.sql | 42 + .../20171221142234-ep-step-limit.sql | 42 + .../20171221142553-ep-step-action-limit.sql | 42 + ...71221150317-rotation-participant-limit.sql | 42 + .../20171221150825-schedule-rule-limit.sql | 42 + .../20171221150955-integration-key-limit.sql | 42 + .../20171221151358-unacked-alert-limit.sql | 42 + ...62356-case-insenstive-name-constraints.sql | 17 + .../20180103113251-schedule-target-limit.sql | 45 + ...104114110-disable-process-alerts-queue.sql | 31 + ...180104122450-wait-alert-queue-finished.sql | 38 + .../20180104123517-outgoing-messages.sql | 70 + .../migrations/20180104124640-ncycle-tick.sql | 73 + ...04125444-twilio-sms-multiple-callbacks.sql | 12 + .../20180109114058-email-integration-key.sql | 6 + ...80110155110-alert-unique-dedup-service.sql | 57 + ...80117110856-status-update-message-type.sql | 7 + .../20180117115123-alert-status-updates.sql | 86 + ...0180118112019-restrict-cm-to-same-user.sql | 68 + ...162030-heartbeat-auth-log-subject-type.sql | 6 + .../migrations/20180126162093-heartbeats.sql | 24 + ...20180126162144-heartbeat-auth-log-data.sql | 19 + .../20180130123755-heartbeat-limit-key.sql | 5 + .../20180130123852-heartbeat-limit.sql | 42 + .../20180201180221-add-verification-code.sql | 28 + ...80207113632-ep-step-number-consistency.sql | 137 + ...ation-participant-position-consistency.sql | 106 + ...216104945-alerts-split-summary-details.sql | 63 + ...228103159-schedule-overrides-limit-key.sql | 5 + .../20180228111204-schedule-overrides.sql | 93 + .../20180313152132-schedule-on-call-users.sql | 17 + .../20180315113303-strict-rotation-state.sql | 225 + .../20180320153326-npcycle-indexes.sql | 14 + .../20180321143255-ep-step-count.sql | 68 + .../20180321145054-strict-ep-state.sql | 103 + .../20180326154252-move-rotation-triggers.sql | 23 + .../20180330110116-move-ep-triggers.sql | 17 + .../20180403113645-fix-rot-part-delete.sql | 104 + .../20180417142940-region-processing.sql | 9 + ...17100033-clear-cycles-on-policy-change.sql | 24 + ...135700-policy-reassignment-trigger-fix.sql | 55 + migrate/migrations/20180517210000-auth2.sql | 86 + migrate/migrations/20180517220000-keyring.sql | 15 + .../migrations/20180517230000-auth-nonce.sql | 11 + .../20180521124533-UserFavorites.sql | 9 + ...80710110438-engine-processing-versions.sql | 34 + ...180720121433-increment-module-versions.sql | 20 + .../20180720121533-drop-dedup-trigger.sql | 28 + .../20180720121633-drop-description-col.sql | 46 + ...0180720121733-fix-svc-ep-state-trigger.sql | 88 + ...0180720121833-create-ep-state-on-alert.sql | 80 + ...80720121933-store-next-escalation-time.sql | 12 + .../20180720122033-ep-step-on-call.sql | 17 + .../20180720122133-clear-next-esc-on-ack.sql | 27 + ...20122233-drop-unique-cycles-constraint.sql | 9 + .../20180720122333-fix-schedule-index.sql | 13 + ...0122433-trig-alert-on-force-escalation.sql | 52 + .../20180720122533-drop-ep-state-np-trig.sql | 23 + ...0720122633-update-existing-escalations.sql | 42 + .../20180728150427-add-provider-msg-id.sql | 23 + .../20180803090205-drop-alert-assignments.sql | 17 + ...drop-alert-escalation-policy-snapshots.sql | 22 + .../20180803090405-drop-notification-logs.sql | 21 + .../20180803090505-drop-process-alerts.sql | 21 + .../20180803090605-drop-process-rotations.sql | 19 + .../20180803090705-drop-process-schedules.sql | 18 + ...20180803090805-drop-sent-notifications.sql | 27 + .../20180803090905-drop-throttle.sql | 15 + ...3091005-drop-user-contact-method-locks.sql | 23 + ...03110851-drop-twilio-egress-sms-status.sql | 30 + ...110859-drop-twilio-egress-voice-status.sql | 32 + .../20180806092512-incr-message-version.sql | 12 + ...0806102513-drop-twilio-voice-callbacks.sql | 13 + ...06102620-drop-user-notification-cycles.sql | 13 + .../20180806102708-drop-auth-github-users.sql | 30 + .../20180806102923-drop-auth-token-codes.sql | 11 + .../20180816094955-switchover-state.sql | 21 + .../migrations/20180816095055-add-row-ids.sql | 71 + .../migrations/20180816095155-change-log.sql | 399 + .../20180816164203-drop-end-time-check.sql | 7 + .../20180821150330-deferable-status-cm.sql | 8 + .../20180822153707-defer-rotation-state.sql | 10 + .../20180822153914-defer-ep-state.sql | 8 + ...0831132457-user-last-alert-log-indexes.sql | 15 + .../20180831132707-alerts-service-index.sql | 7 + .../20180831132743-np-cycle-alert-index.sql | 7 + .../20180831132927-alert-logs-index.sql | 21 + ...20180831143308-outgoing-messages-index.sql | 13 + ...180907111203-schedule-rule-endtime-fix.sql | 34 + .../20180918102226-add-service-label.sql | 14 + ...181004032148-labels-switchover-trigger.sql | 15 + ...181004145558-fix-deleting-participants.sql | 91 + .../20181008111401-twilio-sms-short-reply.sql | 52 + .../20181018131939-fix-rotation-deletions.sql | 99 + .../20181107133329-notification-channels.sql | 20 + .../20181107155035-nc-id-to-ep-action.sql | 19 + ...20181107155229-om-notification-channel.sql | 18 + ...90117130422-notif-chan-engine-versions.sql | 21 + .../20190129110250-add-cleanup-module.sql | 6 + .../20190201104727-alert-logs-channel.sql | 11 + .../20190201142137-drop-sub-constraint.sql | 25 + .../20190225112925-config-table.sql | 16 + .../20190312153204-slack-api-change.sql | 24 + .../20190313125552-slack-user-link.sql | 11 + .../migrations/20190404105850-nc-no-meta.sql | 12 + .../20190517144224-trigger-config-sync.sql | 21 + notification/alert.go | 19 + notification/alertstatus.go | 17 + notification/dest.go | 26 + notification/desttype_string.go | 26 + notification/manager.go | 234 + notification/message.go | 25 + notification/messagestatus.go | 69 + notification/messagetype_string.go | 26 + notification/namedsender.go | 16 + notification/notifier.go | 52 + notification/result_string.go | 25 + notification/slack/cache.go | 43 + notification/slack/channel.go | 313 + notification/slack/config.go | 21 + notification/slack/throttle.go | 50 + notification/store.go | 306 + notification/stubsender.go | 26 + notification/testnotification.go | 16 + notification/twilio/alertsms.go | 122 + notification/twilio/alertsms_test.go | 131 + notification/twilio/call.go | 90 + notification/twilio/client.go | 310 + notification/twilio/config.go | 21 + notification/twilio/dbban.go | 66 + notification/twilio/dbsms.go | 136 + notification/twilio/exception.go | 15 + notification/twilio/headerhack.go | 44 + notification/twilio/message.go | 101 + notification/twilio/signature.go | 38 + notification/twilio/signature_test.go | 33 + notification/twilio/sms.go | 386 + notification/twilio/validation.go | 89 + notification/twilio/voice.go | 702 + notification/verification.go | 17 + notificationchannel/channel.go | 33 + notificationchannel/store.go | 130 + notificationchannel/type.go | 34 + oncall/pool.go | 41 + oncall/state.go | 237 + oncall/state_test.go | 610 + oncall/store.go | 290 + override/override.go | 60 + override/search.go | 173 + override/store.go | 284 + permission/checker.go | 158 + permission/context.go | 215 + permission/context_test.go | 76 + permission/contextkey.go | 15 + permission/error.go | 65 + permission/permission.go | 12 + permission/permission_test.go | 20 + permission/role.go | 44 + permission/source.go | 44 + permission/sourcetype_string.go | 28 + permission/store.go | 1 + remotemonitor/config.go | 26 + remotemonitor/doc.go | 18 + remotemonitor/instance.go | 90 + remotemonitor/monitor.go | 184 + remotemonitor/requestid.go | 21 + remotemonitor/sms.go | 80 + retry/do.go | 81 + retry/temporary.go | 59 + schedule/rotation/participant.go | 29 + schedule/rotation/rotation.go | 114 + schedule/rotation/rotation_test.go | 249 + schedule/rotation/search.go | 128 + schedule/rotation/state.go | 24 + schedule/rotation/store.go | 904 + schedule/rotation/type.go | 74 + schedule/rule/clock.go | 99 + schedule/rule/rule.go | 224 + schedule/rule/rule_test.go | 402 + schedule/rule/store.go | 477 + schedule/rule/weekdayfilter.go | 129 + schedule/rule/weekdayfilter_test.go | 90 + schedule/schedule.go | 30 + schedule/schedule_test.go | 30 + schedule/search.go | 128 + schedule/shiftcalc/overrides.go | 90 + schedule/shiftcalc/overrides_test.go | 126 + schedule/shiftcalc/shiftcalc.go | 502 + schedule/shiftcalc/shiftcalc_test.go | 120 + schedule/store.go | 269 + search/config.go | 8 + search/cursor.go | 29 + search/escape.go | 12 + search/render.go | 45 + service/legacysearch.go | 173 + service/search.go | 224 + service/service.go | 37 + service/service_test.go | 36 + service/store.go | 332 + smoketest/README.md | 34 + smoketest/addrules_test.go | 98 + smoketest/dedupnotifications_test.go | 58 + smoketest/deleteescalationpolicy_test.go | 45 + smoketest/deleterotation_test.go | 50 + smoketest/escalation_test.go | 61 + smoketest/escalationgap_test.go | 47 + smoketest/escalationnotification_test.go | 78 + smoketest/genericapi_test.go | 64 + smoketest/genericapiclose_test.go | 91 + smoketest/genericapidedup_test.go | 105 + smoketest/grafana_test.go | 69 + smoketest/graphql2users_test.go | 103 + smoketest/graphqlalert_test.go | 204 + smoketest/graphqlcreateschedule_test.go | 87 + ...aphqlcreatescheduledefaultrotation_test.go | 94 + smoketest/graphqlmultiplealerts_test.go | 135 + smoketest/graphqloncall_test.go | 188 + smoketest/graphqloncallassignments_test.go | 490 + smoketest/graphqlservicelabels_test.go | 58 + smoketest/graphqlupdaterotation_test.go | 123 + smoketest/graphqluserfavorites_test.go | 122 + smoketest/graphqlusers_test.go | 58 + smoketest/harness/backend.go | 70 + smoketest/harness/datagen.go | 110 + smoketest/harness/graphql.go | 95 + smoketest/harness/harness.go | 645 + smoketest/harness/harness_notunix.go | 5 + smoketest/harness/harness_unix.go | 9 + smoketest/harness/slack.go | 116 + smoketest/harness/twilio.go | 427 + smoketest/heartbeat_test.go | 75 + smoketest/inprogress_test.go | 113 + smoketest/listalerts_test.go | 63 + .../manualescalationnotification_test.go | 60 + smoketest/migrations_test.go | 678 + smoketest/missinguser_test.go | 100 + smoketest/multistepnotification_test.go | 61 + smoketest/multiuser_test.go | 59 + smoketest/policyreassignment_test.go | 97 + smoketest/postcyclerules_test.go | 71 + smoketest/prioritization_test.go | 76 + smoketest/rotationdaily_test.go | 72 + smoketest/rotationdst_test.go | 100 + smoketest/rotationgap_test.go | 64 + smoketest/rotationhourly_test.go | 77 + smoketest/rotationweekly_test.go | 73 + smoketest/rotationwrap_test.go | 74 + smoketest/schedulerule_test.go | 77 + smoketest/simplenotification_india_test.go | 68 + smoketest/simplenotification_test.go | 54 + smoketest/slackaddtoepstep_test.go | 57 + smoketest/slackchannels_test.go | 65 + smoketest/slacknotification_test.go | 37 + smoketest/statusupdates_test.go | 95 + smoketest/systemlimits_test.go | 407 + smoketest/twilioenablebysms_test.go | 77 + smoketest/twilioenablebyvoice_test.go | 77 + smoketest/twiliosmsack_test.go | 59 + smoketest/twiliosmsclose_test.go | 59 + smoketest/twiliosmsfailure_test.go | 53 + smoketest/twiliosmsreplycode_test.go | 72 + smoketest/twiliosmsreplylast_test.go | 60 + smoketest/twiliosmsstop_test.go | 57 + smoketest/twiliosmstrailingspace_test.go | 53 + smoketest/twiliosmsverification_test.go | 100 + smoketest/twiliotestsms_test.go | 46 + smoketest/twiliotestvoice_test.go | 47 + smoketest/twiliovoiceack_test.go | 58 + smoketest/twiliovoiceclose_test.go | 58 + smoketest/twiliovoiceemptymessage_test.go | 50 + smoketest/twiliovoicefailure_test.go | 51 + smoketest/twiliovoicestop_test.go | 65 + smoketest/twiliovoiceverification_test.go | 100 + smoketest/upload.sh | 37 + smoketest/usernotificationcycles_test.go | 70 + sqltrace/attributes.go | 21 + sqltrace/conn.go | 153 + sqltrace/connector.go | 23 + sqltrace/driver.go | 50 + sqltrace/rows.go | 20 + sqltrace/simpleconnector.go | 21 + sqltrace/stmt.go | 90 + sqltrace/tx.go | 31 + switchover/dbstate.go | 51 + switchover/dbsync/ctxshell.go | 75 + switchover/dbsync/diffsync.go | 154 + switchover/dbsync/initsync.go | 101 + switchover/dbsync/listen.go | 50 + switchover/dbsync/sequences.go | 57 + switchover/dbsync/shell.go | 432 + switchover/dbsync/status.go | 104 + switchover/dbsync/sync.go | 337 + switchover/dbsync/table.go | 217 + switchover/deadlineconfig.go | 106 + switchover/handler.go | 166 + switchover/mainloop.go | 207 + switchover/notify.go | 105 + switchover/state.go | 27 + switchover/status.go | 81 + timezone/search.go | 130 + timezone/store.go | 16 + user/authsubject.go | 30 + user/contactmethod/contactmethod.go | 45 + user/contactmethod/contactmethod_test.go | 36 + user/contactmethod/store.go | 359 + user/contactmethod/type.go | 54 + user/favorite/store.go | 157 + user/notificationrule/notificationrule.go | 33 + .../notificationrule/notificationrule_test.go | 36 + user/notificationrule/store.go | 258 + user/search.go | 121 + user/store.go | 454 + user/user.go | 131 + user/user_test.go | 37 + util/alignedticker.go | 75 + util/contextcache.go | 130 + util/contextcache_test.go | 89 + util/contextroundtripper.go | 28 + util/contextwaitgroup.go | 85 + util/errutil/httperror.go | 76 + util/errutil/maperror.go | 70 + util/errutil/scruberror.go | 30 + util/loadlocation.go | 30 + util/log/fields.go | 90 + util/log/fields_test.go | 62 + util/log/log.go | 151 + util/log/sqlhighlight.go | 15 + util/log/terminalformatter.go | 86 + util/sqlprepare.go | 74 + util/util.go | 29 + util/util_test.go | 28 + validation/fieldvalidationerror.go | 105 + validation/validate/email.go | 25 + validation/validate/idname.go | 37 + validation/validate/idname_test.go | 38 + validation/validate/labelkey.go | 79 + validation/validate/labelkey_test.go | 26 + validation/validate/labelvalue.go | 42 + validation/validate/labelvalue_test.go | 26 + validation/validate/many.go | 37 + validation/validate/many_test.go | 36 + validation/validate/name.go | 79 + validation/validate/name_test.go | 59 + validation/validate/oneof.go | 23 + validation/validate/oneof_test.go | 21 + validation/validate/phone.go | 38 + validation/validate/phone_test.go | 41 + validation/validate/range.go | 22 + validation/validate/subjectid.go | 33 + validation/validate/text.go | 84 + validation/validate/url.go | 31 + validation/validate/username.go | 29 + validation/validate/uuid.go | 45 + web/.gitignore | 2 + web/bundle.go | 3 + web/handler.go | 94 + web/inline_types_gen.go | 10 + web/src/.editorconfig | 9 + web/src/.eslintignore | 3 + web/src/.eslintrc.js | 26 + web/src/.gitignore | 20 + web/src/.gqlconfig | 14 + web/src/.prettierignore | 3 + web/src/.stylelintrc | 3 + web/src/app/actions/alerts.js | 34 + web/src/app/actions/auth.js | 17 + web/src/app/actions/index.js | 4 + web/src/app/actions/main.js | 78 + web/src/app/actions/service.js | 8 + web/src/app/admin/AdminConfig.js | 174 + web/src/app/admin/AdminConfigSection.js | 97 + web/src/app/admin/AdminConfirmDialog.js | 103 + web/src/app/admin/AdminFieldComponents.js | 95 + web/src/app/admin/AdminRouter.js | 22 + web/src/app/admin/index.js | 4 + web/src/app/alerts/AlertRouter.js | 18 + web/src/app/alerts/components/AlertDetails.js | 474 + web/src/app/alerts/components/AlertForm.js | 223 + web/src/app/alerts/components/AlertsList.js | 370 + .../alerts/components/AlertsListControls.js | 40 + .../components/AlertsListDataWrapper.js | 169 + .../app/alerts/components/AlertsListFilter.js | 213 + .../components/CheckedAlertsFormControl.js | 397 + .../app/alerts/components/CreateAlertFab.js | 87 + .../alerts/components/UpdateAlertsSnackbar.js | 95 + web/src/app/alerts/pages/AlertDetailPage.js | 77 + web/src/app/alerts/pages/AlertsIndexPage.js | 20 + web/src/app/alerts/queries/AlertsListQuery.js | 61 + web/src/app/apollo.js | 135 + web/src/app/config/index.js | 11 + .../components/ContactMethodForm.js | 409 + .../components/VerificationForm.js | 200 + web/src/app/details/DetailsPage.js | 149 + web/src/app/dialogs/FormDialog.js | 218 + .../dialogs/components/ApolloFormDialog.js | 188 + .../dialogs/components/ConfirmationDialog.js | 180 + .../dialogs/components/DialogContentError.js | 44 + .../dialogs/components/DialogTitleWrapper.js | 81 + .../app/dialogs/components/DropDownMenu.js | 64 + web/src/app/dialogs/components/FormDialog.js | 197 + web/src/app/documentation/IntegrationKeys.md | 72 + .../components/IntegrationKeyAPI.js | 45 + web/src/app/error-pages/Errors.js | 52 + web/src/app/error-pages/index.js | 1 + .../escalation-policies/PolicyCreateDialog.js | 91 + .../escalation-policies/PolicyDeleteDialog.js | 63 + .../app/escalation-policies/PolicyDetails.js | 109 + .../escalation-policies/PolicyEditDialog.js | 113 + web/src/app/escalation-policies/PolicyForm.js | 83 + .../app/escalation-policies/PolicyRouter.js | 65 + .../escalation-policies/PolicyServicesCard.js | 41 + .../PolicyServicesQuery.js | 37 + web/src/app/escalation-policies/PolicyStep.js | 207 + .../PolicyStepCreateDialog.js | 131 + .../PolicyStepDeleteDialog.js | 108 + .../PolicyStepEditDialog.js | 111 + .../app/escalation-policies/PolicyStepForm.js | 272 + .../escalation-policies/PolicyStepsCard.js | 280 + .../escalation-policies/PolicyStepsQuery.js | 47 + web/src/app/forms/Form.js | 50 + web/src/app/forms/FormContainer.js | 136 + web/src/app/forms/FormField.js | 196 + web/src/app/forms/README.md | 57 + web/src/app/forms/context.js | 16 + web/src/app/forms/index.js | 3 + web/src/app/history.js | 3 + web/src/app/icons/components/Icons.js | 82 + web/src/app/icons/index.js | 1 + web/src/app/index.js | 63 + web/src/app/links/RotationLink.js | 6 + web/src/app/links/ScheduleLink.js | 6 + web/src/app/links/ServiceLink.js | 6 + web/src/app/links/UserLink.js | 6 + web/src/app/links/index.js | 4 + web/src/app/lists/CreateFAB.js | 24 + web/src/app/lists/FlatList.js | 219 + web/src/app/lists/PaginatedList.js | 336 + web/src/app/lists/QueryList.js | 131 + web/src/app/lists/SimpleListPage.js | 35 + .../app/lists/components/BaseActionsMenu.js | 75 + web/src/app/lists/index.js | 1 + .../app/loading/components/LoadingButton.js | 54 + web/src/app/loading/components/Spinner.js | 58 + web/src/app/main/ErrorBoundary.js | 20 + web/src/app/main/MobileSideBar.js | 33 + web/src/app/main/NewApp.js | 137 + web/src/app/main/URLErrorDialog.js | 41 + web/src/app/main/WideSideBar.js | 36 + web/src/app/main/components/Login.js | 273 + web/src/app/main/components/NewUserSetup.js | 78 + .../app/main/components/SideBarDrawerList.js | 209 + web/src/app/main/components/ToolbarAction.js | 71 + web/src/app/main/components/ToolbarTitle.js | 205 + web/src/app/main/routes.js | 115 + web/src/app/mui-pickers.js | 11 + web/src/app/mui.js | 29 + .../components/CreateNotificationRuleForm.js | 238 + web/src/app/public/favicon-128.png | Bin 0 -> 8900 bytes web/src/app/public/favicon-16.png | Bin 0 -> 732 bytes web/src/app/public/favicon-192.png | Bin 0 -> 15049 bytes web/src/app/public/favicon-32.png | Bin 0 -> 1641 bytes web/src/app/public/favicon-64.png | Bin 0 -> 3876 bytes web/src/app/public/favicon.ico | Bin 0 -> 254558 bytes .../app/public/goalert-alt-logo-scaled.png | Bin 0 -> 5616 bytes web/src/app/public/goalert-alt-logo.png | Bin 0 -> 177876 bytes web/src/app/public/goalert-logo-scaled.png | Bin 0 -> 13636 bytes web/src/app/public/goalert-logo-scaled.webp | Bin 0 -> 3726 bytes .../app/public/goalert-logo-scaled@1.5.webp | Bin 0 -> 6070 bytes web/src/app/public/goalert-logo-scaled@2.png | Bin 0 -> 14448 bytes web/src/app/public/goalert-logo-scaled@2.webp | Bin 0 -> 8056 bytes web/src/app/public/goalert-logo.png | Bin 0 -> 232146 bytes web/src/app/public/slack.svg | 33 + web/src/app/public/slack_monochrome_black.svg | 27 + web/src/app/reducers/alerts.js | 32 + web/src/app/reducers/auth.js | 14 + web/src/app/reducers/index.js | 14 + web/src/app/reducers/main.js | 23 + web/src/app/reduxStore.js | 16 + web/src/app/rhl.js | 4 + .../app/rotations/RotationAddUserDialog.js | 83 + web/src/app/rotations/RotationCreateDialog.js | 82 + web/src/app/rotations/RotationDeleteDialog.js | 83 + web/src/app/rotations/RotationDetails.js | 112 + web/src/app/rotations/RotationEditDialog.js | 102 + web/src/app/rotations/RotationForm.js | 162 + web/src/app/rotations/RotationRouter.js | 53 + .../app/rotations/RotationSetActiveDialog.js | 82 + .../app/rotations/RotationUserDeleteDialog.js | 84 + web/src/app/rotations/RotationUserList.js | 202 + web/src/app/rotations/RotationUserListItem.js | 88 + web/src/app/rotations/UserForm.js | 35 + web/src/app/rotations/util.js | 112 + web/src/app/rotations/util.test.js | 161 + web/src/app/schedules/CalendarEventWrapper.js | 166 + web/src/app/schedules/CalendarToolbar.js | 188 + .../app/schedules/ScheduleAssignedToList.js | 49 + web/src/app/schedules/ScheduleCalendar.js | 274 + .../app/schedules/ScheduleCalendarQuery.js | 81 + web/src/app/schedules/ScheduleCreateDialog.js | 73 + web/src/app/schedules/ScheduleDeleteDialog.js | 82 + web/src/app/schedules/ScheduleDetails.js | 151 + web/src/app/schedules/ScheduleEditDialog.js | 96 + web/src/app/schedules/ScheduleForm.js | 58 + .../app/schedules/ScheduleNewOverrideFAB.js | 32 + .../schedules/ScheduleOverrideCreateDialog.js | 122 + .../schedules/ScheduleOverrideDeleteDialog.js | 109 + .../schedules/ScheduleOverrideEditDialog.js | 115 + web/src/app/schedules/ScheduleOverrideForm.js | 200 + web/src/app/schedules/ScheduleOverrideList.js | 196 + web/src/app/schedules/ScheduleRouter.js | 87 + .../app/schedules/ScheduleRuleCreateDialog.js | 85 + .../app/schedules/ScheduleRuleDeleteDialog.js | 93 + .../app/schedules/ScheduleRuleEditDialog.js | 123 + web/src/app/schedules/ScheduleRuleForm.js | 345 + web/src/app/schedules/ScheduleRuleList.js | 161 + web/src/app/schedules/ScheduleShiftList.js | 370 + web/src/app/schedules/ScheduleTZFilter.js | 69 + web/src/app/schedules/util.js | 161 + web/src/app/schedules/util.test.js | 112 + .../app/selection/EscalationPolicySelect.js | 29 + web/src/app/selection/LabelKeySelect.js | 30 + web/src/app/selection/MaterialSelect.js | 98 + .../app/selection/MaterialSelectComponents.js | 171 + web/src/app/selection/QuerySelect.js | 306 + web/src/app/selection/RotationSelect.js | 29 + web/src/app/selection/ScheduleSelect.js | 29 + web/src/app/selection/ServiceSelect.js | 39 + web/src/app/selection/SlackChannelSelect.js | 50 + web/src/app/selection/TimeZoneSelect.js | 26 + web/src/app/selection/UserSelect.js | 29 + web/src/app/selection/index.js | 8 + web/src/app/selectors/index.js | 1 + web/src/app/selectors/url.js | 50 + web/src/app/selectors/url.test.js | 70 + .../services/IntegrationKeyCreateDialog.js | 104 + .../services/IntegrationKeyDeleteDialog.js | 118 + web/src/app/services/IntegrationKeyForm.js | 86 + web/src/app/services/IntegrationKeyList.js | 210 + web/src/app/services/ServiceCreateDialog.js | 131 + web/src/app/services/ServiceDeleteDialog.js | 145 + web/src/app/services/ServiceDetails.js | 147 + web/src/app/services/ServiceEditDialog.js | 120 + web/src/app/services/ServiceForm.js | 84 + .../app/services/ServiceLabelCreateDialog.js | 107 + .../app/services/ServiceLabelDeleteDialog.js | 99 + .../app/services/ServiceLabelEditDialog.js | 127 + web/src/app/services/ServiceLabelForm.js | 77 + web/src/app/services/ServiceLabelList.js | 125 + web/src/app/services/ServiceOnCallDisplay.js | 36 + web/src/app/services/ServiceRouter.js | 87 + .../services/components/OnCallForService.js | 94 + .../app/services/components/ServiceAlerts.js | 110 + .../services/components/SetFavoriteButton.js | 109 + web/src/app/styles/base/elements.scss | 136 + web/src/app/styles/base/theme.scss | 1 + web/src/app/styles/base/variables.scss | 6 + web/src/app/styles/index.js | 2 + web/src/app/styles/materialStyles.js | 118 + web/src/app/templates/index.html | 51 + .../users/UserContactMethodCreateDialog.js | 84 + .../users/UserContactMethodDeleteDialog.js | 48 + .../app/users/UserContactMethodEditDialog.js | 99 + web/src/app/users/UserContactMethodForm.js | 93 + web/src/app/users/UserContactMethodList.js | 116 + web/src/app/users/UserContactMethodSelect.js | 68 + web/src/app/users/UserDetails.js | 160 + .../users/UserNotificationRuleCreateDialog.js | 78 + .../users/UserNotificationRuleDeleteDialog.js | 47 + web/src/app/users/UserNotificationRuleForm.js | 58 + web/src/app/users/UserNotificationRuleList.js | 79 + web/src/app/users/UserOnCallAssignmentList.js | 98 + web/src/app/users/UserRouter.js | 95 + .../app/users/UserStatusUpdatePreference.js | 76 + web/src/app/users/util.js | 58 + web/src/app/users/util.test.js | 64 + web/src/app/util/Chips.js | 222 + web/src/app/util/CountDown.js | 134 + web/src/app/util/Diff.js | 97 + web/src/app/util/FilterContainer.js | 112 + web/src/app/util/GoogleAnalytics.js | 68 + web/src/app/util/Markdown.js | 33 + web/src/app/util/MountWatcher.js | 23 + web/src/app/util/Options.js | 275 + web/src/app/util/OtherActions.js | 78 + web/src/app/util/OtherActionsDesktop.js | 65 + web/src/app/util/OtherActionsMobile.js | 51 + web/src/app/util/PageActions.js | 132 + web/src/app/util/Query.js | 160 + web/src/app/util/RequireConfig.js | 146 + web/src/app/util/Search.js | 134 + web/src/app/util/SpeedDial.js | 74 + web/src/app/util/Transitions.js | 15 + web/src/app/util/avatar/BaseAvatar.js | 69 + web/src/app/util/avatar/index.js | 2 + web/src/app/util/avatar/types.js | 53 + web/src/app/util/copyToClipboard.js | 44 + web/src/app/util/debug.js | 6 + web/src/app/util/errutil.js | 75 + web/src/app/util/gracefulUnmount.js | 111 + web/src/app/util/graphql.js | 208 + web/src/app/util/graphql.test.js | 220 + web/src/app/util/joinURL.js | 16 + web/src/app/util/joinURL.test.js | 24 + web/src/app/util/on-demand.js | 67 + web/src/app/util/poll_intervals.js | 2 + web/src/app/util/propTypes.js | 23 + web/src/app/util/query_param.js | 62 + web/src/app/util/safeURL.js | 12 + web/src/app/util/safeURL.test.js | 63 + web/src/app/util/statusStyles.js | 14 + web/src/app/util/timeFormat.js | 31 + web/src/app/util/toTitleCase.js | 5 + web/src/app/wizard/WizardForm.js | 202 + web/src/app/wizard/WizardRouter.js | 267 + web/src/app/wizard/WizardScheduleForm.js | 318 + web/src/app/wizard/propTypes.js | 27 + web/src/app/wizard/util.js | 103 + web/src/app/wizard/util.test.js | 229 + web/src/app/wizard/utilTestData.js | 143 + web/src/babel.config.js | 10 + web/src/codemods/README.md | 15 + web/src/codemods/jsimports.js | 101 + web/src/codemods/material-core.js | 45 + web/src/cypress.json | 10 + web/src/cypress/README.md | 17 + web/src/cypress/fixtures/example.json | 5 + web/src/cypress/fixtures/profile.json | 8 + web/src/cypress/fixtures/profileAdmin.json | 8 + web/src/cypress/fixtures/users.json | 20 + web/src/cypress/integration/admin.ts | 195 + web/src/cypress/integration/alerts.ts | 308 + web/src/cypress/integration/auth.ts | 38 + .../cypress/integration/escalationPolicies.ts | 188 + .../integration/escalationPolicySteps.ts | 267 + web/src/cypress/integration/markdown.ts | 79 + web/src/cypress/integration/pagination.ts | 132 + web/src/cypress/integration/profile.ts | 156 + web/src/cypress/integration/rotations.ts | 262 + .../cypress/integration/scheduleCalendar.ts | 167 + web/src/cypress/integration/schedules.ts | 341 + web/src/cypress/integration/services.ts | 451 + web/src/cypress/integration/sidebar.ts | 49 + web/src/cypress/integration/wizard.ts | 243 + web/src/cypress/plugins/index.js | 44 + web/src/cypress/support/alert.ts | 75 + web/src/cypress/support/commands.js | 25 + web/src/cypress/support/config.ts | 143 + web/src/cypress/support/ep.ts | 131 + web/src/cypress/support/fail-fast.ts | 17 + web/src/cypress/support/graphql.ts | 56 + web/src/cypress/support/index.ts | 40 + web/src/cypress/support/login.ts | 84 + web/src/cypress/support/menu.ts | 41 + .../cypress/support/navitage-to-and-from.ts | 63 + web/src/cypress/support/page-action.ts | 15 + web/src/cypress/support/page-fab.ts | 22 + web/src/cypress/support/page-nav.ts | 26 + web/src/cypress/support/page-search.ts | 33 + web/src/cypress/support/profile.ts | 224 + web/src/cypress/support/rotation.ts | 117 + web/src/cypress/support/schedule.ts | 196 + web/src/cypress/support/select-by-label.ts | 94 + web/src/cypress/support/service.ts | 165 + web/src/cypress/support/util.ts | 41 + web/src/cypress/tsconfig.json | 10 + web/src/graphql.config.json | 31 + web/src/jsconfig.json | 8 + web/src/package.json | 181 + web/src/postcss.config.js | 5 + web/src/prettier.config.js | 7 + web/src/scripts/insert-users.js | 33 + web/src/scripts/smoketest-prep.sh | 62 + web/src/webpack.config.js | 129 + web/src/webpack.dll.config.js | 94 + web/src/webpack.prod.config.js | 138 + web/src/yarn.lock | 11343 +++++++++++ 1036 files changed, 127111 insertions(+) create mode 100644 .gitignore create mode 100644 CODE_OF_CONDUCT.md create mode 100644 CONTRIBUTING.md create mode 100644 CONTRIBUTORS create mode 100644 LICENSE.md create mode 100644 Makefile create mode 100644 README.md create mode 100644 alert/alert.go create mode 100644 alert/alert_test.go create mode 100644 alert/dedup.go create mode 100644 alert/legacysearch.go create mode 100644 alert/log.go create mode 100644 alert/log/entry.go create mode 100644 alert/log/legacylogs.go create mode 100644 alert/log/meta.go create mode 100644 alert/log/rawentry.go create mode 100644 alert/log/rawjson.go create mode 100644 alert/log/store.go create mode 100644 alert/log/subject.go create mode 100644 alert/log/type.go create mode 100644 alert/logentryfetcher.go create mode 100644 alert/search.go create mode 100644 alert/source.go create mode 100644 alert/state.go create mode 100644 alert/status.go create mode 100644 alert/store.go create mode 100644 alert/summary.go create mode 100644 app/app.go create mode 100644 app/appconfig.go create mode 100644 app/clusterexporter.go create mode 100644 app/cmd.go create mode 100644 app/contextlocker.go create mode 100644 app/cooldown.go create mode 100644 app/getsetconfig.go create mode 100644 app/healthcheck.go create mode 100644 app/initauth.go create mode 100644 app/initengine.go create mode 100644 app/initgraphql.go create mode 100644 app/inithttp.go create mode 100644 app/inithttputil.go create mode 100644 app/inithttputil_test.go create mode 100644 app/initslack.go create mode 100644 app/initstores.go create mode 100644 app/inittwilio.go create mode 100644 app/lifecycle/manager.go create mode 100644 app/lifecycle/manager_test.go create mode 100644 app/lifecycle/pauseable.go create mode 100644 app/lifecycle/pauseresumer.go create mode 100644 app/lifecycle/pauseresumer_test.go create mode 100644 app/listenevents.go create mode 100644 app/listenstatus.go create mode 100644 app/logexporter.go create mode 100644 app/middleware.go create mode 100644 app/middlewaregzip.go create mode 100644 app/pause.go create mode 100644 app/recoverexporter.go create mode 100644 app/runapp.go create mode 100644 app/shutdown.go create mode 100644 app/shutdownsignals_unix.go create mode 100644 app/startup.go create mode 100644 app/tracing.go create mode 100644 app/trigger.go create mode 100644 app/version.go create mode 100644 assignment/assignment.go create mode 100644 assignment/doc.go create mode 100644 assignment/source.go create mode 100644 assignment/srctype.go create mode 100644 assignment/srctype_string.go create mode 100644 assignment/target.go create mode 100644 assignment/targettype.go create mode 100644 assignment/targettype_string.go create mode 100644 auth/basic/config.go create mode 100644 auth/basic/db.go create mode 100644 auth/basic/doc.go create mode 100644 auth/basic/identityprovider.go create mode 100644 auth/basic/provider.go create mode 100644 auth/cookies.go create mode 100644 auth/faildelay.go create mode 100644 auth/gettoken.go create mode 100644 auth/github/config.go create mode 100644 auth/github/doc.go create mode 100644 auth/github/identityprovider.go create mode 100644 auth/github/provider.go create mode 100644 auth/handler.go create mode 100644 auth/identityprovider.go create mode 100644 auth/nonce/nonce.go create mode 100644 auth/oidc/config.go create mode 100644 auth/oidc/identityprovider.go create mode 100644 auth/routeinfo.go create mode 100644 cmd/goalert/main.go create mode 100644 config/config.go create mode 100644 config/context.go create mode 100644 config/mergejson.go create mode 100644 config/mergejson_test.go create mode 100644 config/source.go create mode 100644 config/store.go create mode 100644 dataloader/alertloader.go create mode 100644 dataloader/cmloader.go create mode 100644 dataloader/loader.go create mode 100644 dataloader/loader_test.go create mode 100644 dataloader/policyloader.go create mode 100644 dataloader/rotationloader.go create mode 100644 dataloader/scheduleloader.go create mode 100644 dataloader/serviceloader.go create mode 100644 dataloader/userloader.go create mode 100644 devtools/ci/dockerfiles/all-in-one/Dockerfile create mode 100644 devtools/ci/dockerfiles/all-in-one/README.md create mode 100755 devtools/ci/dockerfiles/all-in-one/start.sh create mode 100644 devtools/ci/dockerfiles/build-env/Dockerfile create mode 100644 devtools/ci/tasks/build-debug.yml create mode 100644 devtools/ci/tasks/build-test.yml create mode 100755 devtools/ci/tasks/scripts/build-test.sh create mode 100755 devtools/ci/tasks/scripts/codecheck.sh create mode 100644 devtools/configparams/main.go create mode 100644 devtools/devtools.go create mode 100644 devtools/fetchcounts/main.go create mode 100644 devtools/gqlgen/gqlgen.go create mode 100644 devtools/inliner/linebreaker.go create mode 100644 devtools/inliner/main.go create mode 100644 devtools/mockslack/api.go create mode 100644 devtools/mockslack/authrevoke.go create mode 100644 devtools/mockslack/channelscreate.go create mode 100644 devtools/mockslack/chatpostmessage.go create mode 100644 devtools/mockslack/cmd/mockslack/main.go create mode 100644 devtools/mockslack/conversationsinfo.go create mode 100644 devtools/mockslack/conversationslist.go create mode 100644 devtools/mockslack/conversationslist_test.go create mode 100644 devtools/mockslack/groupscreate.go create mode 100644 devtools/mockslack/http.go create mode 100644 devtools/mockslack/login.go create mode 100644 devtools/mockslack/oauthaccess.go create mode 100644 devtools/mockslack/oauthauthorize.go create mode 100644 devtools/mockslack/permissions.go create mode 100644 devtools/mockslack/server.go create mode 100644 devtools/mockslack/state.go create mode 100644 devtools/mockslack/user.go create mode 100644 devtools/mockslack/util.go create mode 100644 devtools/mockslack/validate.go create mode 100644 devtools/mocktwilio/server.go create mode 100644 devtools/mocktwilio/sms.go create mode 100644 devtools/mocktwilio/voicecall.go create mode 100644 devtools/ordermigrations/main.go create mode 100644 devtools/resetdb/main.go create mode 100644 devtools/runjson/ci-cypress.json create mode 100644 devtools/runjson/localdev-cypress-prod.json create mode 100644 devtools/runjson/localdev-cypress.json create mode 100644 devtools/runjson/localdev.json create mode 100644 devtools/runjson/main.go create mode 100644 devtools/runjson/run.go create mode 100644 devtools/runjson/task.go create mode 100644 devtools/simpleproxy/main.go create mode 100644 devtools/tools.go create mode 100644 devtools/waitfor/main.go create mode 100644 engine/backend.go create mode 100644 engine/callback.go create mode 100644 engine/cleanupmanager/db.go create mode 100644 engine/cleanupmanager/update.go create mode 100644 engine/config.go create mode 100644 engine/engine.go create mode 100644 engine/escalationmanager/db.go create mode 100644 engine/escalationmanager/update.go create mode 100644 engine/heartbeatmanager/db.go create mode 100644 engine/heartbeatmanager/process.go create mode 100644 engine/message/config.go create mode 100644 engine/message/db.go create mode 100644 engine/message/message.go create mode 100644 engine/message/row.go create mode 100644 engine/message/status.go create mode 100644 engine/message/type.go create mode 100644 engine/npcyclemanager/db.go create mode 100644 engine/npcyclemanager/update.go create mode 100644 engine/processinglock/config.go create mode 100644 engine/processinglock/conn.go create mode 100644 engine/processinglock/error.go create mode 100644 engine/processinglock/lock.go create mode 100644 engine/processinglock/type.go create mode 100644 engine/resolver/resolver.go create mode 100644 engine/resolver/state.go create mode 100644 engine/rotationmanager/advance.go create mode 100644 engine/rotationmanager/db.go create mode 100644 engine/rotationmanager/update.go create mode 100644 engine/schedulemanager/db.go create mode 100644 engine/schedulemanager/update.go create mode 100644 engine/sendmessage.go create mode 100644 engine/statusupdatemanager/db.go create mode 100644 engine/statusupdatemanager/update.go create mode 100644 engine/verifymanager/db.go create mode 100644 engine/verifymanager/update.go create mode 100644 escalation/policy.go create mode 100644 escalation/policy_test.go create mode 100644 escalation/search.go create mode 100644 escalation/step.go create mode 100644 escalation/step_test.go create mode 100644 escalation/store.go create mode 100644 genericapi/config.go create mode 100644 genericapi/handler.go create mode 100644 go.mod create mode 100644 go.sum create mode 100644 grafana/grafana.go create mode 100644 graphql/alert.go create mode 100644 graphql/alertlog.go create mode 100644 graphql/assignment.go create mode 100644 graphql/cache.go create mode 100644 graphql/config.go create mode 100644 graphql/contactmethod.go create mode 100644 graphql/createall.go create mode 100644 graphql/createallutil.go create mode 100644 graphql/deleteall.go create mode 100644 graphql/deleteallutil.go create mode 100644 graphql/escalation.go create mode 100644 graphql/handler.go create mode 100644 graphql/heartbeat.go create mode 100644 graphql/integrationkey.go create mode 100644 graphql/label.go create mode 100644 graphql/legacydb.go create mode 100644 graphql/limit.go create mode 100644 graphql/notificationrule.go create mode 100644 graphql/oncallassignment.go create mode 100644 graphql/rotation.go create mode 100644 graphql/rotationparticipant.go create mode 100644 graphql/schedule.go create mode 100644 graphql/scheduleassignment.go create mode 100644 graphql/schedulerule.go create mode 100644 graphql/scheduleshift.go create mode 100644 graphql/schema.go create mode 100644 graphql/service.go create mode 100644 graphql/user.go create mode 100644 graphql/userfavorite.go create mode 100644 graphql/useroverride.go create mode 100644 graphql/util.go create mode 100644 graphql2/clocktime.go create mode 100644 graphql2/cmtype.go create mode 100644 graphql2/gen.go create mode 100644 graphql2/generated.go create mode 100644 graphql2/gqlgen.yml create mode 100644 graphql2/graphqlapp/alert.go create mode 100644 graphql2/graphqlapp/app.go create mode 100644 graphql2/graphqlapp/config.go create mode 100644 graphql2/graphqlapp/contactmethod.go create mode 100644 graphql2/graphqlapp/dataloaders.go create mode 100644 graphql2/graphqlapp/escalationpolicy.go create mode 100644 graphql2/graphqlapp/integrationkey.go create mode 100644 graphql2/graphqlapp/label.go create mode 100644 graphql2/graphqlapp/mutation.go create mode 100644 graphql2/graphqlapp/notificationrule.go create mode 100644 graphql2/graphqlapp/oncall.go create mode 100644 graphql2/graphqlapp/playground.go create mode 100644 graphql2/graphqlapp/query.go create mode 100644 graphql2/graphqlapp/rotation.go create mode 100644 graphql2/graphqlapp/schedule.go create mode 100644 graphql2/graphqlapp/schedulerule.go create mode 100644 graphql2/graphqlapp/service.go create mode 100644 graphql2/graphqlapp/slack.go create mode 100644 graphql2/graphqlapp/target.go create mode 100644 graphql2/graphqlapp/timezone.go create mode 100644 graphql2/graphqlapp/tx.go create mode 100644 graphql2/graphqlapp/user.go create mode 100644 graphql2/graphqlapp/useroverride.go create mode 100644 graphql2/isotimestamp.go create mode 100644 graphql2/mapconfig.go create mode 100644 graphql2/models_gen.go create mode 100644 graphql2/schema.graphql create mode 100644 heartbeat/monitor.go create mode 100644 heartbeat/state.go create mode 100644 heartbeat/store.go create mode 100644 integrationkey/integrationkey.go create mode 100644 integrationkey/integrationkey_test.go create mode 100644 integrationkey/store.go create mode 100644 integrationkey/type.go create mode 100644 internal/generatemocks.go create mode 100644 internal/match/assignmentmatchers.go create mode 100644 keyring/keys.go create mode 100644 keyring/store.go create mode 100644 keyring/store_test.go create mode 100644 label/label.go create mode 100644 label/search.go create mode 100644 label/store.go create mode 100644 limit/error.go create mode 100644 limit/id.go create mode 100644 limit/limits.go create mode 100644 limit/store.go create mode 100644 lock/global.go create mode 100644 logging/errors.slide create mode 100644 mailgun/mailgun.go create mode 100644 migrate/inline_data_gen.go create mode 100644 migrate/inline_types_gen.go create mode 100644 migrate/migrate.go create mode 100644 migrate/migrations/20170426134008-init.sql create mode 100644 migrate/migrations/20170428154209-users-table.sql create mode 100644 migrate/migrations/20170502172843-user-settings.sql create mode 100644 migrate/migrations/20170503144542-remove-carrier.sql create mode 100644 migrate/migrations/20170503144821-remove-email-verified.sql create mode 100644 migrate/migrations/20170503154907-delay-minutes.sql create mode 100644 migrate/migrations/20170509154250-alerts.sql create mode 100644 migrate/migrations/20170515120511-escalation-policy-actions.sql create mode 100644 migrate/migrations/20170515162554-user-notifications.sql create mode 100644 migrate/migrations/20170518142432-alert-assignments.sql create mode 100644 migrate/migrations/20170530135027-schedule-rotation.sql create mode 100644 migrate/migrations/20170605131920-twilio-sms.sql create mode 100644 migrate/migrations/20170605131942-twilio-voice.sql create mode 100644 migrate/migrations/20170607103917-throttle.sql create mode 100644 migrate/migrations/20170612101232-escalation-tweaks.sql create mode 100644 migrate/migrations/20170613122551-auth-token.sql create mode 100644 migrate/migrations/20170619123628-add-constraints.sql create mode 100644 migrate/migrations/20170619164449-bobby-tables.sql create mode 100644 migrate/migrations/20170620104459-contact-constraints.sql create mode 100644 migrate/migrations/20170621141923-notification-query-fixes.sql create mode 100644 migrate/migrations/20170621170744-add-country-code.sql create mode 100644 migrate/migrations/20170623151348-on-call-alert-distinct.sql create mode 100644 migrate/migrations/20170623155346-delete-keys-with-service.sql create mode 100644 migrate/migrations/20170629104138-escalation-policy-tweak.sql create mode 100644 migrate/migrations/20170630095448-integration-to-integration-keys.sql create mode 100644 migrate/migrations/20170706102439-esc-zero-index.sql create mode 100644 migrate/migrations/20170707135355-esc-cascade-steps-actions.sql create mode 100644 migrate/migrations/20170707153545-limit-cm-per-interval.sql create mode 100644 migrate/migrations/20170710155447-fix-escalations.sql create mode 100644 migrate/migrations/20170712094434-notification-policy-updates.sql create mode 100644 migrate/migrations/20170713113728-escalation-schema-hardening.sql create mode 100644 migrate/migrations/20170714155817-notification-rule-tweak.sql create mode 100644 migrate/migrations/20170717151241-remove-old-esc-columns.sql create mode 100644 migrate/migrations/20170717151336-remove-old-service-columns.sql create mode 100644 migrate/migrations/20170717151358-remove-old-tables.sql create mode 100644 migrate/migrations/20170717152954-ids-to-uuids.sql create mode 100644 migrate/migrations/20170724162219-fix-alert-escalations.sql create mode 100644 migrate/migrations/20170725105059-rotations-shift-length-check.sql create mode 100644 migrate/migrations/20170725105905-fix-shift-calculation.sql create mode 100644 migrate/migrations/20170726141849-handle-missing-users.sql create mode 100644 migrate/migrations/20170726143800-no-oncall-for-future-rotations.sql create mode 100644 migrate/migrations/20170726155056-twilio-sms-errors.sql create mode 100644 migrate/migrations/20170726155351-twilio-voice-errors.sql create mode 100644 migrate/migrations/20170802114735-alert_logs_enum_update.sql create mode 100644 migrate/migrations/20170802160314-add-timezones.sql create mode 100644 migrate/migrations/20170808110638-user-email-nullable-allowed.sql create mode 100644 migrate/migrations/20170811110036-add-generic-integration-key.sql create mode 100644 migrate/migrations/20170817102712-atomic-escalation-policies.sql create mode 100644 migrate/migrations/20170818135106-add-gravatar-col-to-user.sql create mode 100644 migrate/migrations/20170825124926-escalation-policy-step-reorder.sql create mode 100644 migrate/migrations/20171024114842-adjust-notification-create-at-check.sql create mode 100644 migrate/migrations/20171027145352-dont-notify-disabled-cms.sql create mode 100644 migrate/migrations/20171030130758-ev3-drop-views.sql create mode 100644 migrate/migrations/20171030130759-ev3-schedule-rules.sql create mode 100644 migrate/migrations/20171030130800-ev3-notification-policy.sql create mode 100644 migrate/migrations/20171030130801-ev3-escalation-policy-state.sql create mode 100644 migrate/migrations/20171030130802-ev3-rotations.sql create mode 100644 migrate/migrations/20171030130804-ev3-assign-schedule-rotations.sql create mode 100644 migrate/migrations/20171030130806-ev3-add-rotation-ep-action.sql create mode 100644 migrate/migrations/20171030130810-ev3-notification-logs.sql create mode 100644 migrate/migrations/20171030130811-ev3-drop-ep-snapshot-trigger.sql create mode 100644 migrate/migrations/20171030130812-ev3-rotation-state.sql create mode 100644 migrate/migrations/20171030130813-ev3-throttle-locks.sql create mode 100644 migrate/migrations/20171030150519-ev3-remove-status-trigger.sql create mode 100644 migrate/migrations/20171126093536-schedule-rule-processing.sql create mode 100644 migrate/migrations/20171201104359-structured-alert-logs.sql create mode 100644 migrate/migrations/20171201104433-add-alert-log-types.sql create mode 100644 migrate/migrations/20171205125227-twilio-egress-sms-tracking.sql create mode 100644 migrate/migrations/20171211101108-twilio-egress-voice-tracking.sql create mode 100644 migrate/migrations/20171213141802-add-alert-source-email.sql create mode 100644 migrate/migrations/20171220113439-add-alert-dedup-keys.sql create mode 100644 migrate/migrations/20171221134500-limit-configuration.sql create mode 100644 migrate/migrations/20171221138101-notification-rule-limit.sql create mode 100644 migrate/migrations/20171221140906-contact-method-limit.sql create mode 100644 migrate/migrations/20171221142234-ep-step-limit.sql create mode 100644 migrate/migrations/20171221142553-ep-step-action-limit.sql create mode 100644 migrate/migrations/20171221150317-rotation-participant-limit.sql create mode 100644 migrate/migrations/20171221150825-schedule-rule-limit.sql create mode 100644 migrate/migrations/20171221150955-integration-key-limit.sql create mode 100644 migrate/migrations/20171221151358-unacked-alert-limit.sql create mode 100644 migrate/migrations/20171221162356-case-insenstive-name-constraints.sql create mode 100644 migrate/migrations/20180103113251-schedule-target-limit.sql create mode 100644 migrate/migrations/20180104114110-disable-process-alerts-queue.sql create mode 100644 migrate/migrations/20180104122450-wait-alert-queue-finished.sql create mode 100644 migrate/migrations/20180104123517-outgoing-messages.sql create mode 100644 migrate/migrations/20180104124640-ncycle-tick.sql create mode 100644 migrate/migrations/20180104125444-twilio-sms-multiple-callbacks.sql create mode 100644 migrate/migrations/20180109114058-email-integration-key.sql create mode 100644 migrate/migrations/20180110155110-alert-unique-dedup-service.sql create mode 100644 migrate/migrations/20180117110856-status-update-message-type.sql create mode 100644 migrate/migrations/20180117115123-alert-status-updates.sql create mode 100644 migrate/migrations/20180118112019-restrict-cm-to-same-user.sql create mode 100644 migrate/migrations/20180126162030-heartbeat-auth-log-subject-type.sql create mode 100644 migrate/migrations/20180126162093-heartbeats.sql create mode 100644 migrate/migrations/20180126162144-heartbeat-auth-log-data.sql create mode 100644 migrate/migrations/20180130123755-heartbeat-limit-key.sql create mode 100644 migrate/migrations/20180130123852-heartbeat-limit.sql create mode 100644 migrate/migrations/20180201180221-add-verification-code.sql create mode 100644 migrate/migrations/20180207113632-ep-step-number-consistency.sql create mode 100644 migrate/migrations/20180207124220-rotation-participant-position-consistency.sql create mode 100644 migrate/migrations/20180216104945-alerts-split-summary-details.sql create mode 100644 migrate/migrations/20180228103159-schedule-overrides-limit-key.sql create mode 100644 migrate/migrations/20180228111204-schedule-overrides.sql create mode 100644 migrate/migrations/20180313152132-schedule-on-call-users.sql create mode 100644 migrate/migrations/20180315113303-strict-rotation-state.sql create mode 100644 migrate/migrations/20180320153326-npcycle-indexes.sql create mode 100644 migrate/migrations/20180321143255-ep-step-count.sql create mode 100644 migrate/migrations/20180321145054-strict-ep-state.sql create mode 100644 migrate/migrations/20180326154252-move-rotation-triggers.sql create mode 100644 migrate/migrations/20180330110116-move-ep-triggers.sql create mode 100644 migrate/migrations/20180403113645-fix-rot-part-delete.sql create mode 100644 migrate/migrations/20180417142940-region-processing.sql create mode 100644 migrate/migrations/20180517100033-clear-cycles-on-policy-change.sql create mode 100644 migrate/migrations/20180517135700-policy-reassignment-trigger-fix.sql create mode 100644 migrate/migrations/20180517210000-auth2.sql create mode 100644 migrate/migrations/20180517220000-keyring.sql create mode 100644 migrate/migrations/20180517230000-auth-nonce.sql create mode 100644 migrate/migrations/20180521124533-UserFavorites.sql create mode 100644 migrate/migrations/20180710110438-engine-processing-versions.sql create mode 100644 migrate/migrations/20180720121433-increment-module-versions.sql create mode 100644 migrate/migrations/20180720121533-drop-dedup-trigger.sql create mode 100644 migrate/migrations/20180720121633-drop-description-col.sql create mode 100644 migrate/migrations/20180720121733-fix-svc-ep-state-trigger.sql create mode 100644 migrate/migrations/20180720121833-create-ep-state-on-alert.sql create mode 100644 migrate/migrations/20180720121933-store-next-escalation-time.sql create mode 100644 migrate/migrations/20180720122033-ep-step-on-call.sql create mode 100644 migrate/migrations/20180720122133-clear-next-esc-on-ack.sql create mode 100644 migrate/migrations/20180720122233-drop-unique-cycles-constraint.sql create mode 100644 migrate/migrations/20180720122333-fix-schedule-index.sql create mode 100644 migrate/migrations/20180720122433-trig-alert-on-force-escalation.sql create mode 100644 migrate/migrations/20180720122533-drop-ep-state-np-trig.sql create mode 100644 migrate/migrations/20180720122633-update-existing-escalations.sql create mode 100644 migrate/migrations/20180728150427-add-provider-msg-id.sql create mode 100644 migrate/migrations/20180803090205-drop-alert-assignments.sql create mode 100644 migrate/migrations/20180803090305-drop-alert-escalation-policy-snapshots.sql create mode 100644 migrate/migrations/20180803090405-drop-notification-logs.sql create mode 100644 migrate/migrations/20180803090505-drop-process-alerts.sql create mode 100644 migrate/migrations/20180803090605-drop-process-rotations.sql create mode 100644 migrate/migrations/20180803090705-drop-process-schedules.sql create mode 100644 migrate/migrations/20180803090805-drop-sent-notifications.sql create mode 100644 migrate/migrations/20180803090905-drop-throttle.sql create mode 100644 migrate/migrations/20180803091005-drop-user-contact-method-locks.sql create mode 100644 migrate/migrations/20180803110851-drop-twilio-egress-sms-status.sql create mode 100644 migrate/migrations/20180803110859-drop-twilio-egress-voice-status.sql create mode 100644 migrate/migrations/20180806092512-incr-message-version.sql create mode 100644 migrate/migrations/20180806102513-drop-twilio-voice-callbacks.sql create mode 100644 migrate/migrations/20180806102620-drop-user-notification-cycles.sql create mode 100644 migrate/migrations/20180806102708-drop-auth-github-users.sql create mode 100644 migrate/migrations/20180806102923-drop-auth-token-codes.sql create mode 100644 migrate/migrations/20180816094955-switchover-state.sql create mode 100644 migrate/migrations/20180816095055-add-row-ids.sql create mode 100644 migrate/migrations/20180816095155-change-log.sql create mode 100644 migrate/migrations/20180816164203-drop-end-time-check.sql create mode 100644 migrate/migrations/20180821150330-deferable-status-cm.sql create mode 100644 migrate/migrations/20180822153707-defer-rotation-state.sql create mode 100644 migrate/migrations/20180822153914-defer-ep-state.sql create mode 100644 migrate/migrations/20180831132457-user-last-alert-log-indexes.sql create mode 100644 migrate/migrations/20180831132707-alerts-service-index.sql create mode 100644 migrate/migrations/20180831132743-np-cycle-alert-index.sql create mode 100644 migrate/migrations/20180831132927-alert-logs-index.sql create mode 100644 migrate/migrations/20180831143308-outgoing-messages-index.sql create mode 100644 migrate/migrations/20180907111203-schedule-rule-endtime-fix.sql create mode 100644 migrate/migrations/20180918102226-add-service-label.sql create mode 100644 migrate/migrations/20181004032148-labels-switchover-trigger.sql create mode 100644 migrate/migrations/20181004145558-fix-deleting-participants.sql create mode 100644 migrate/migrations/20181008111401-twilio-sms-short-reply.sql create mode 100644 migrate/migrations/20181018131939-fix-rotation-deletions.sql create mode 100644 migrate/migrations/20181107133329-notification-channels.sql create mode 100644 migrate/migrations/20181107155035-nc-id-to-ep-action.sql create mode 100644 migrate/migrations/20181107155229-om-notification-channel.sql create mode 100644 migrate/migrations/20190117130422-notif-chan-engine-versions.sql create mode 100644 migrate/migrations/20190129110250-add-cleanup-module.sql create mode 100644 migrate/migrations/20190201104727-alert-logs-channel.sql create mode 100644 migrate/migrations/20190201142137-drop-sub-constraint.sql create mode 100644 migrate/migrations/20190225112925-config-table.sql create mode 100644 migrate/migrations/20190312153204-slack-api-change.sql create mode 100644 migrate/migrations/20190313125552-slack-user-link.sql create mode 100644 migrate/migrations/20190404105850-nc-no-meta.sql create mode 100644 migrate/migrations/20190517144224-trigger-config-sync.sql create mode 100644 notification/alert.go create mode 100644 notification/alertstatus.go create mode 100644 notification/dest.go create mode 100644 notification/desttype_string.go create mode 100644 notification/manager.go create mode 100644 notification/message.go create mode 100644 notification/messagestatus.go create mode 100644 notification/messagetype_string.go create mode 100644 notification/namedsender.go create mode 100644 notification/notifier.go create mode 100644 notification/result_string.go create mode 100644 notification/slack/cache.go create mode 100644 notification/slack/channel.go create mode 100644 notification/slack/config.go create mode 100644 notification/slack/throttle.go create mode 100644 notification/store.go create mode 100644 notification/stubsender.go create mode 100644 notification/testnotification.go create mode 100644 notification/twilio/alertsms.go create mode 100644 notification/twilio/alertsms_test.go create mode 100644 notification/twilio/call.go create mode 100644 notification/twilio/client.go create mode 100644 notification/twilio/config.go create mode 100644 notification/twilio/dbban.go create mode 100644 notification/twilio/dbsms.go create mode 100644 notification/twilio/exception.go create mode 100644 notification/twilio/headerhack.go create mode 100644 notification/twilio/message.go create mode 100644 notification/twilio/signature.go create mode 100644 notification/twilio/signature_test.go create mode 100644 notification/twilio/sms.go create mode 100644 notification/twilio/validation.go create mode 100644 notification/twilio/voice.go create mode 100644 notification/verification.go create mode 100644 notificationchannel/channel.go create mode 100644 notificationchannel/store.go create mode 100644 notificationchannel/type.go create mode 100644 oncall/pool.go create mode 100644 oncall/state.go create mode 100644 oncall/state_test.go create mode 100644 oncall/store.go create mode 100644 override/override.go create mode 100644 override/search.go create mode 100644 override/store.go create mode 100644 permission/checker.go create mode 100644 permission/context.go create mode 100644 permission/context_test.go create mode 100644 permission/contextkey.go create mode 100644 permission/error.go create mode 100644 permission/permission.go create mode 100644 permission/permission_test.go create mode 100644 permission/role.go create mode 100644 permission/source.go create mode 100644 permission/sourcetype_string.go create mode 100644 permission/store.go create mode 100644 remotemonitor/config.go create mode 100644 remotemonitor/doc.go create mode 100644 remotemonitor/instance.go create mode 100644 remotemonitor/monitor.go create mode 100644 remotemonitor/requestid.go create mode 100644 remotemonitor/sms.go create mode 100644 retry/do.go create mode 100644 retry/temporary.go create mode 100644 schedule/rotation/participant.go create mode 100644 schedule/rotation/rotation.go create mode 100644 schedule/rotation/rotation_test.go create mode 100644 schedule/rotation/search.go create mode 100644 schedule/rotation/state.go create mode 100644 schedule/rotation/store.go create mode 100644 schedule/rotation/type.go create mode 100644 schedule/rule/clock.go create mode 100644 schedule/rule/rule.go create mode 100644 schedule/rule/rule_test.go create mode 100644 schedule/rule/store.go create mode 100644 schedule/rule/weekdayfilter.go create mode 100644 schedule/rule/weekdayfilter_test.go create mode 100644 schedule/schedule.go create mode 100644 schedule/schedule_test.go create mode 100644 schedule/search.go create mode 100644 schedule/shiftcalc/overrides.go create mode 100644 schedule/shiftcalc/overrides_test.go create mode 100644 schedule/shiftcalc/shiftcalc.go create mode 100644 schedule/shiftcalc/shiftcalc_test.go create mode 100644 schedule/store.go create mode 100644 search/config.go create mode 100644 search/cursor.go create mode 100644 search/escape.go create mode 100644 search/render.go create mode 100644 service/legacysearch.go create mode 100644 service/search.go create mode 100644 service/service.go create mode 100644 service/service_test.go create mode 100644 service/store.go create mode 100644 smoketest/README.md create mode 100644 smoketest/addrules_test.go create mode 100644 smoketest/dedupnotifications_test.go create mode 100644 smoketest/deleteescalationpolicy_test.go create mode 100644 smoketest/deleterotation_test.go create mode 100644 smoketest/escalation_test.go create mode 100644 smoketest/escalationgap_test.go create mode 100644 smoketest/escalationnotification_test.go create mode 100644 smoketest/genericapi_test.go create mode 100644 smoketest/genericapiclose_test.go create mode 100644 smoketest/genericapidedup_test.go create mode 100644 smoketest/grafana_test.go create mode 100644 smoketest/graphql2users_test.go create mode 100644 smoketest/graphqlalert_test.go create mode 100644 smoketest/graphqlcreateschedule_test.go create mode 100644 smoketest/graphqlcreatescheduledefaultrotation_test.go create mode 100644 smoketest/graphqlmultiplealerts_test.go create mode 100644 smoketest/graphqloncall_test.go create mode 100644 smoketest/graphqloncallassignments_test.go create mode 100644 smoketest/graphqlservicelabels_test.go create mode 100644 smoketest/graphqlupdaterotation_test.go create mode 100644 smoketest/graphqluserfavorites_test.go create mode 100644 smoketest/graphqlusers_test.go create mode 100644 smoketest/harness/backend.go create mode 100644 smoketest/harness/datagen.go create mode 100644 smoketest/harness/graphql.go create mode 100644 smoketest/harness/harness.go create mode 100644 smoketest/harness/harness_notunix.go create mode 100644 smoketest/harness/harness_unix.go create mode 100644 smoketest/harness/slack.go create mode 100644 smoketest/harness/twilio.go create mode 100644 smoketest/heartbeat_test.go create mode 100644 smoketest/inprogress_test.go create mode 100644 smoketest/listalerts_test.go create mode 100644 smoketest/manualescalationnotification_test.go create mode 100644 smoketest/migrations_test.go create mode 100644 smoketest/missinguser_test.go create mode 100644 smoketest/multistepnotification_test.go create mode 100644 smoketest/multiuser_test.go create mode 100644 smoketest/policyreassignment_test.go create mode 100644 smoketest/postcyclerules_test.go create mode 100644 smoketest/prioritization_test.go create mode 100644 smoketest/rotationdaily_test.go create mode 100644 smoketest/rotationdst_test.go create mode 100644 smoketest/rotationgap_test.go create mode 100644 smoketest/rotationhourly_test.go create mode 100644 smoketest/rotationweekly_test.go create mode 100644 smoketest/rotationwrap_test.go create mode 100644 smoketest/schedulerule_test.go create mode 100644 smoketest/simplenotification_india_test.go create mode 100644 smoketest/simplenotification_test.go create mode 100644 smoketest/slackaddtoepstep_test.go create mode 100644 smoketest/slackchannels_test.go create mode 100644 smoketest/slacknotification_test.go create mode 100644 smoketest/statusupdates_test.go create mode 100644 smoketest/systemlimits_test.go create mode 100644 smoketest/twilioenablebysms_test.go create mode 100644 smoketest/twilioenablebyvoice_test.go create mode 100644 smoketest/twiliosmsack_test.go create mode 100644 smoketest/twiliosmsclose_test.go create mode 100644 smoketest/twiliosmsfailure_test.go create mode 100644 smoketest/twiliosmsreplycode_test.go create mode 100644 smoketest/twiliosmsreplylast_test.go create mode 100644 smoketest/twiliosmsstop_test.go create mode 100644 smoketest/twiliosmstrailingspace_test.go create mode 100644 smoketest/twiliosmsverification_test.go create mode 100644 smoketest/twiliotestsms_test.go create mode 100644 smoketest/twiliotestvoice_test.go create mode 100644 smoketest/twiliovoiceack_test.go create mode 100644 smoketest/twiliovoiceclose_test.go create mode 100644 smoketest/twiliovoiceemptymessage_test.go create mode 100644 smoketest/twiliovoicefailure_test.go create mode 100644 smoketest/twiliovoicestop_test.go create mode 100644 smoketest/twiliovoiceverification_test.go create mode 100755 smoketest/upload.sh create mode 100644 smoketest/usernotificationcycles_test.go create mode 100644 sqltrace/attributes.go create mode 100644 sqltrace/conn.go create mode 100644 sqltrace/connector.go create mode 100644 sqltrace/driver.go create mode 100644 sqltrace/rows.go create mode 100644 sqltrace/simpleconnector.go create mode 100644 sqltrace/stmt.go create mode 100644 sqltrace/tx.go create mode 100644 switchover/dbstate.go create mode 100644 switchover/dbsync/ctxshell.go create mode 100644 switchover/dbsync/diffsync.go create mode 100644 switchover/dbsync/initsync.go create mode 100644 switchover/dbsync/listen.go create mode 100644 switchover/dbsync/sequences.go create mode 100644 switchover/dbsync/shell.go create mode 100644 switchover/dbsync/status.go create mode 100644 switchover/dbsync/sync.go create mode 100644 switchover/dbsync/table.go create mode 100644 switchover/deadlineconfig.go create mode 100644 switchover/handler.go create mode 100644 switchover/mainloop.go create mode 100644 switchover/notify.go create mode 100644 switchover/state.go create mode 100644 switchover/status.go create mode 100644 timezone/search.go create mode 100644 timezone/store.go create mode 100644 user/authsubject.go create mode 100644 user/contactmethod/contactmethod.go create mode 100644 user/contactmethod/contactmethod_test.go create mode 100644 user/contactmethod/store.go create mode 100644 user/contactmethod/type.go create mode 100644 user/favorite/store.go create mode 100644 user/notificationrule/notificationrule.go create mode 100644 user/notificationrule/notificationrule_test.go create mode 100644 user/notificationrule/store.go create mode 100644 user/search.go create mode 100644 user/store.go create mode 100644 user/user.go create mode 100644 user/user_test.go create mode 100644 util/alignedticker.go create mode 100644 util/contextcache.go create mode 100644 util/contextcache_test.go create mode 100644 util/contextroundtripper.go create mode 100644 util/contextwaitgroup.go create mode 100644 util/errutil/httperror.go create mode 100644 util/errutil/maperror.go create mode 100644 util/errutil/scruberror.go create mode 100644 util/loadlocation.go create mode 100644 util/log/fields.go create mode 100644 util/log/fields_test.go create mode 100644 util/log/log.go create mode 100644 util/log/sqlhighlight.go create mode 100644 util/log/terminalformatter.go create mode 100644 util/sqlprepare.go create mode 100644 util/util.go create mode 100644 util/util_test.go create mode 100644 validation/fieldvalidationerror.go create mode 100644 validation/validate/email.go create mode 100644 validation/validate/idname.go create mode 100644 validation/validate/idname_test.go create mode 100644 validation/validate/labelkey.go create mode 100644 validation/validate/labelkey_test.go create mode 100644 validation/validate/labelvalue.go create mode 100644 validation/validate/labelvalue_test.go create mode 100644 validation/validate/many.go create mode 100644 validation/validate/many_test.go create mode 100644 validation/validate/name.go create mode 100644 validation/validate/name_test.go create mode 100644 validation/validate/oneof.go create mode 100644 validation/validate/oneof_test.go create mode 100644 validation/validate/phone.go create mode 100644 validation/validate/phone_test.go create mode 100644 validation/validate/range.go create mode 100644 validation/validate/subjectid.go create mode 100644 validation/validate/text.go create mode 100644 validation/validate/url.go create mode 100644 validation/validate/username.go create mode 100644 validation/validate/uuid.go create mode 100644 web/.gitignore create mode 100644 web/bundle.go create mode 100644 web/handler.go create mode 100644 web/inline_types_gen.go create mode 100644 web/src/.editorconfig create mode 100644 web/src/.eslintignore create mode 100644 web/src/.eslintrc.js create mode 100644 web/src/.gitignore create mode 100644 web/src/.gqlconfig create mode 100644 web/src/.prettierignore create mode 100644 web/src/.stylelintrc create mode 100644 web/src/app/actions/alerts.js create mode 100644 web/src/app/actions/auth.js create mode 100644 web/src/app/actions/index.js create mode 100644 web/src/app/actions/main.js create mode 100644 web/src/app/actions/service.js create mode 100644 web/src/app/admin/AdminConfig.js create mode 100644 web/src/app/admin/AdminConfigSection.js create mode 100644 web/src/app/admin/AdminConfirmDialog.js create mode 100644 web/src/app/admin/AdminFieldComponents.js create mode 100644 web/src/app/admin/AdminRouter.js create mode 100644 web/src/app/admin/index.js create mode 100644 web/src/app/alerts/AlertRouter.js create mode 100644 web/src/app/alerts/components/AlertDetails.js create mode 100644 web/src/app/alerts/components/AlertForm.js create mode 100644 web/src/app/alerts/components/AlertsList.js create mode 100644 web/src/app/alerts/components/AlertsListControls.js create mode 100644 web/src/app/alerts/components/AlertsListDataWrapper.js create mode 100644 web/src/app/alerts/components/AlertsListFilter.js create mode 100644 web/src/app/alerts/components/CheckedAlertsFormControl.js create mode 100644 web/src/app/alerts/components/CreateAlertFab.js create mode 100644 web/src/app/alerts/components/UpdateAlertsSnackbar.js create mode 100644 web/src/app/alerts/pages/AlertDetailPage.js create mode 100644 web/src/app/alerts/pages/AlertsIndexPage.js create mode 100644 web/src/app/alerts/queries/AlertsListQuery.js create mode 100644 web/src/app/apollo.js create mode 100644 web/src/app/config/index.js create mode 100644 web/src/app/contact-methods/components/ContactMethodForm.js create mode 100644 web/src/app/contact-methods/components/VerificationForm.js create mode 100644 web/src/app/details/DetailsPage.js create mode 100644 web/src/app/dialogs/FormDialog.js create mode 100644 web/src/app/dialogs/components/ApolloFormDialog.js create mode 100644 web/src/app/dialogs/components/ConfirmationDialog.js create mode 100644 web/src/app/dialogs/components/DialogContentError.js create mode 100644 web/src/app/dialogs/components/DialogTitleWrapper.js create mode 100644 web/src/app/dialogs/components/DropDownMenu.js create mode 100644 web/src/app/dialogs/components/FormDialog.js create mode 100644 web/src/app/documentation/IntegrationKeys.md create mode 100644 web/src/app/documentation/components/IntegrationKeyAPI.js create mode 100644 web/src/app/error-pages/Errors.js create mode 100644 web/src/app/error-pages/index.js create mode 100644 web/src/app/escalation-policies/PolicyCreateDialog.js create mode 100644 web/src/app/escalation-policies/PolicyDeleteDialog.js create mode 100644 web/src/app/escalation-policies/PolicyDetails.js create mode 100644 web/src/app/escalation-policies/PolicyEditDialog.js create mode 100644 web/src/app/escalation-policies/PolicyForm.js create mode 100644 web/src/app/escalation-policies/PolicyRouter.js create mode 100644 web/src/app/escalation-policies/PolicyServicesCard.js create mode 100644 web/src/app/escalation-policies/PolicyServicesQuery.js create mode 100644 web/src/app/escalation-policies/PolicyStep.js create mode 100644 web/src/app/escalation-policies/PolicyStepCreateDialog.js create mode 100644 web/src/app/escalation-policies/PolicyStepDeleteDialog.js create mode 100644 web/src/app/escalation-policies/PolicyStepEditDialog.js create mode 100644 web/src/app/escalation-policies/PolicyStepForm.js create mode 100644 web/src/app/escalation-policies/PolicyStepsCard.js create mode 100644 web/src/app/escalation-policies/PolicyStepsQuery.js create mode 100644 web/src/app/forms/Form.js create mode 100644 web/src/app/forms/FormContainer.js create mode 100644 web/src/app/forms/FormField.js create mode 100644 web/src/app/forms/README.md create mode 100644 web/src/app/forms/context.js create mode 100644 web/src/app/forms/index.js create mode 100644 web/src/app/history.js create mode 100644 web/src/app/icons/components/Icons.js create mode 100644 web/src/app/icons/index.js create mode 100644 web/src/app/index.js create mode 100644 web/src/app/links/RotationLink.js create mode 100644 web/src/app/links/ScheduleLink.js create mode 100644 web/src/app/links/ServiceLink.js create mode 100644 web/src/app/links/UserLink.js create mode 100644 web/src/app/links/index.js create mode 100644 web/src/app/lists/CreateFAB.js create mode 100644 web/src/app/lists/FlatList.js create mode 100644 web/src/app/lists/PaginatedList.js create mode 100644 web/src/app/lists/QueryList.js create mode 100644 web/src/app/lists/SimpleListPage.js create mode 100644 web/src/app/lists/components/BaseActionsMenu.js create mode 100644 web/src/app/lists/index.js create mode 100644 web/src/app/loading/components/LoadingButton.js create mode 100644 web/src/app/loading/components/Spinner.js create mode 100644 web/src/app/main/ErrorBoundary.js create mode 100644 web/src/app/main/MobileSideBar.js create mode 100644 web/src/app/main/NewApp.js create mode 100644 web/src/app/main/URLErrorDialog.js create mode 100644 web/src/app/main/WideSideBar.js create mode 100644 web/src/app/main/components/Login.js create mode 100644 web/src/app/main/components/NewUserSetup.js create mode 100644 web/src/app/main/components/SideBarDrawerList.js create mode 100644 web/src/app/main/components/ToolbarAction.js create mode 100644 web/src/app/main/components/ToolbarTitle.js create mode 100644 web/src/app/main/routes.js create mode 100644 web/src/app/mui-pickers.js create mode 100644 web/src/app/mui.js create mode 100644 web/src/app/notification-rules/components/CreateNotificationRuleForm.js create mode 100644 web/src/app/public/favicon-128.png create mode 100644 web/src/app/public/favicon-16.png create mode 100644 web/src/app/public/favicon-192.png create mode 100644 web/src/app/public/favicon-32.png create mode 100644 web/src/app/public/favicon-64.png create mode 100644 web/src/app/public/favicon.ico create mode 100644 web/src/app/public/goalert-alt-logo-scaled.png create mode 100644 web/src/app/public/goalert-alt-logo.png create mode 100644 web/src/app/public/goalert-logo-scaled.png create mode 100644 web/src/app/public/goalert-logo-scaled.webp create mode 100644 web/src/app/public/goalert-logo-scaled@1.5.webp create mode 100644 web/src/app/public/goalert-logo-scaled@2.png create mode 100644 web/src/app/public/goalert-logo-scaled@2.webp create mode 100644 web/src/app/public/goalert-logo.png create mode 100644 web/src/app/public/slack.svg create mode 100644 web/src/app/public/slack_monochrome_black.svg create mode 100644 web/src/app/reducers/alerts.js create mode 100644 web/src/app/reducers/auth.js create mode 100644 web/src/app/reducers/index.js create mode 100644 web/src/app/reducers/main.js create mode 100644 web/src/app/reduxStore.js create mode 100644 web/src/app/rhl.js create mode 100644 web/src/app/rotations/RotationAddUserDialog.js create mode 100644 web/src/app/rotations/RotationCreateDialog.js create mode 100644 web/src/app/rotations/RotationDeleteDialog.js create mode 100644 web/src/app/rotations/RotationDetails.js create mode 100644 web/src/app/rotations/RotationEditDialog.js create mode 100644 web/src/app/rotations/RotationForm.js create mode 100644 web/src/app/rotations/RotationRouter.js create mode 100644 web/src/app/rotations/RotationSetActiveDialog.js create mode 100644 web/src/app/rotations/RotationUserDeleteDialog.js create mode 100644 web/src/app/rotations/RotationUserList.js create mode 100644 web/src/app/rotations/RotationUserListItem.js create mode 100644 web/src/app/rotations/UserForm.js create mode 100644 web/src/app/rotations/util.js create mode 100644 web/src/app/rotations/util.test.js create mode 100644 web/src/app/schedules/CalendarEventWrapper.js create mode 100644 web/src/app/schedules/CalendarToolbar.js create mode 100644 web/src/app/schedules/ScheduleAssignedToList.js create mode 100644 web/src/app/schedules/ScheduleCalendar.js create mode 100644 web/src/app/schedules/ScheduleCalendarQuery.js create mode 100644 web/src/app/schedules/ScheduleCreateDialog.js create mode 100644 web/src/app/schedules/ScheduleDeleteDialog.js create mode 100644 web/src/app/schedules/ScheduleDetails.js create mode 100644 web/src/app/schedules/ScheduleEditDialog.js create mode 100644 web/src/app/schedules/ScheduleForm.js create mode 100644 web/src/app/schedules/ScheduleNewOverrideFAB.js create mode 100644 web/src/app/schedules/ScheduleOverrideCreateDialog.js create mode 100644 web/src/app/schedules/ScheduleOverrideDeleteDialog.js create mode 100644 web/src/app/schedules/ScheduleOverrideEditDialog.js create mode 100644 web/src/app/schedules/ScheduleOverrideForm.js create mode 100644 web/src/app/schedules/ScheduleOverrideList.js create mode 100644 web/src/app/schedules/ScheduleRouter.js create mode 100644 web/src/app/schedules/ScheduleRuleCreateDialog.js create mode 100644 web/src/app/schedules/ScheduleRuleDeleteDialog.js create mode 100644 web/src/app/schedules/ScheduleRuleEditDialog.js create mode 100644 web/src/app/schedules/ScheduleRuleForm.js create mode 100644 web/src/app/schedules/ScheduleRuleList.js create mode 100644 web/src/app/schedules/ScheduleShiftList.js create mode 100644 web/src/app/schedules/ScheduleTZFilter.js create mode 100644 web/src/app/schedules/util.js create mode 100644 web/src/app/schedules/util.test.js create mode 100644 web/src/app/selection/EscalationPolicySelect.js create mode 100644 web/src/app/selection/LabelKeySelect.js create mode 100644 web/src/app/selection/MaterialSelect.js create mode 100644 web/src/app/selection/MaterialSelectComponents.js create mode 100644 web/src/app/selection/QuerySelect.js create mode 100644 web/src/app/selection/RotationSelect.js create mode 100644 web/src/app/selection/ScheduleSelect.js create mode 100644 web/src/app/selection/ServiceSelect.js create mode 100644 web/src/app/selection/SlackChannelSelect.js create mode 100644 web/src/app/selection/TimeZoneSelect.js create mode 100644 web/src/app/selection/UserSelect.js create mode 100644 web/src/app/selection/index.js create mode 100644 web/src/app/selectors/index.js create mode 100644 web/src/app/selectors/url.js create mode 100644 web/src/app/selectors/url.test.js create mode 100644 web/src/app/services/IntegrationKeyCreateDialog.js create mode 100644 web/src/app/services/IntegrationKeyDeleteDialog.js create mode 100644 web/src/app/services/IntegrationKeyForm.js create mode 100644 web/src/app/services/IntegrationKeyList.js create mode 100644 web/src/app/services/ServiceCreateDialog.js create mode 100644 web/src/app/services/ServiceDeleteDialog.js create mode 100644 web/src/app/services/ServiceDetails.js create mode 100644 web/src/app/services/ServiceEditDialog.js create mode 100644 web/src/app/services/ServiceForm.js create mode 100644 web/src/app/services/ServiceLabelCreateDialog.js create mode 100644 web/src/app/services/ServiceLabelDeleteDialog.js create mode 100644 web/src/app/services/ServiceLabelEditDialog.js create mode 100644 web/src/app/services/ServiceLabelForm.js create mode 100644 web/src/app/services/ServiceLabelList.js create mode 100644 web/src/app/services/ServiceOnCallDisplay.js create mode 100644 web/src/app/services/ServiceRouter.js create mode 100644 web/src/app/services/components/OnCallForService.js create mode 100644 web/src/app/services/components/ServiceAlerts.js create mode 100644 web/src/app/services/components/SetFavoriteButton.js create mode 100644 web/src/app/styles/base/elements.scss create mode 100644 web/src/app/styles/base/theme.scss create mode 100644 web/src/app/styles/base/variables.scss create mode 100644 web/src/app/styles/index.js create mode 100644 web/src/app/styles/materialStyles.js create mode 100644 web/src/app/templates/index.html create mode 100644 web/src/app/users/UserContactMethodCreateDialog.js create mode 100644 web/src/app/users/UserContactMethodDeleteDialog.js create mode 100644 web/src/app/users/UserContactMethodEditDialog.js create mode 100644 web/src/app/users/UserContactMethodForm.js create mode 100644 web/src/app/users/UserContactMethodList.js create mode 100644 web/src/app/users/UserContactMethodSelect.js create mode 100644 web/src/app/users/UserDetails.js create mode 100644 web/src/app/users/UserNotificationRuleCreateDialog.js create mode 100644 web/src/app/users/UserNotificationRuleDeleteDialog.js create mode 100644 web/src/app/users/UserNotificationRuleForm.js create mode 100644 web/src/app/users/UserNotificationRuleList.js create mode 100644 web/src/app/users/UserOnCallAssignmentList.js create mode 100644 web/src/app/users/UserRouter.js create mode 100644 web/src/app/users/UserStatusUpdatePreference.js create mode 100644 web/src/app/users/util.js create mode 100644 web/src/app/users/util.test.js create mode 100644 web/src/app/util/Chips.js create mode 100644 web/src/app/util/CountDown.js create mode 100644 web/src/app/util/Diff.js create mode 100644 web/src/app/util/FilterContainer.js create mode 100644 web/src/app/util/GoogleAnalytics.js create mode 100644 web/src/app/util/Markdown.js create mode 100644 web/src/app/util/MountWatcher.js create mode 100644 web/src/app/util/Options.js create mode 100644 web/src/app/util/OtherActions.js create mode 100644 web/src/app/util/OtherActionsDesktop.js create mode 100644 web/src/app/util/OtherActionsMobile.js create mode 100644 web/src/app/util/PageActions.js create mode 100644 web/src/app/util/Query.js create mode 100644 web/src/app/util/RequireConfig.js create mode 100644 web/src/app/util/Search.js create mode 100644 web/src/app/util/SpeedDial.js create mode 100644 web/src/app/util/Transitions.js create mode 100644 web/src/app/util/avatar/BaseAvatar.js create mode 100644 web/src/app/util/avatar/index.js create mode 100644 web/src/app/util/avatar/types.js create mode 100644 web/src/app/util/copyToClipboard.js create mode 100644 web/src/app/util/debug.js create mode 100644 web/src/app/util/errutil.js create mode 100644 web/src/app/util/gracefulUnmount.js create mode 100644 web/src/app/util/graphql.js create mode 100644 web/src/app/util/graphql.test.js create mode 100644 web/src/app/util/joinURL.js create mode 100644 web/src/app/util/joinURL.test.js create mode 100644 web/src/app/util/on-demand.js create mode 100644 web/src/app/util/poll_intervals.js create mode 100644 web/src/app/util/propTypes.js create mode 100644 web/src/app/util/query_param.js create mode 100644 web/src/app/util/safeURL.js create mode 100644 web/src/app/util/safeURL.test.js create mode 100644 web/src/app/util/statusStyles.js create mode 100644 web/src/app/util/timeFormat.js create mode 100644 web/src/app/util/toTitleCase.js create mode 100644 web/src/app/wizard/WizardForm.js create mode 100644 web/src/app/wizard/WizardRouter.js create mode 100644 web/src/app/wizard/WizardScheduleForm.js create mode 100644 web/src/app/wizard/propTypes.js create mode 100644 web/src/app/wizard/util.js create mode 100644 web/src/app/wizard/util.test.js create mode 100644 web/src/app/wizard/utilTestData.js create mode 100644 web/src/babel.config.js create mode 100644 web/src/codemods/README.md create mode 100644 web/src/codemods/jsimports.js create mode 100644 web/src/codemods/material-core.js create mode 100644 web/src/cypress.json create mode 100644 web/src/cypress/README.md create mode 100644 web/src/cypress/fixtures/example.json create mode 100644 web/src/cypress/fixtures/profile.json create mode 100644 web/src/cypress/fixtures/profileAdmin.json create mode 100644 web/src/cypress/fixtures/users.json create mode 100644 web/src/cypress/integration/admin.ts create mode 100644 web/src/cypress/integration/alerts.ts create mode 100644 web/src/cypress/integration/auth.ts create mode 100644 web/src/cypress/integration/escalationPolicies.ts create mode 100644 web/src/cypress/integration/escalationPolicySteps.ts create mode 100644 web/src/cypress/integration/markdown.ts create mode 100644 web/src/cypress/integration/pagination.ts create mode 100644 web/src/cypress/integration/profile.ts create mode 100644 web/src/cypress/integration/rotations.ts create mode 100644 web/src/cypress/integration/scheduleCalendar.ts create mode 100644 web/src/cypress/integration/schedules.ts create mode 100644 web/src/cypress/integration/services.ts create mode 100644 web/src/cypress/integration/sidebar.ts create mode 100644 web/src/cypress/integration/wizard.ts create mode 100644 web/src/cypress/plugins/index.js create mode 100644 web/src/cypress/support/alert.ts create mode 100644 web/src/cypress/support/commands.js create mode 100644 web/src/cypress/support/config.ts create mode 100644 web/src/cypress/support/ep.ts create mode 100644 web/src/cypress/support/fail-fast.ts create mode 100644 web/src/cypress/support/graphql.ts create mode 100644 web/src/cypress/support/index.ts create mode 100644 web/src/cypress/support/login.ts create mode 100644 web/src/cypress/support/menu.ts create mode 100644 web/src/cypress/support/navitage-to-and-from.ts create mode 100644 web/src/cypress/support/page-action.ts create mode 100644 web/src/cypress/support/page-fab.ts create mode 100644 web/src/cypress/support/page-nav.ts create mode 100644 web/src/cypress/support/page-search.ts create mode 100644 web/src/cypress/support/profile.ts create mode 100644 web/src/cypress/support/rotation.ts create mode 100644 web/src/cypress/support/schedule.ts create mode 100644 web/src/cypress/support/select-by-label.ts create mode 100644 web/src/cypress/support/service.ts create mode 100644 web/src/cypress/support/util.ts create mode 100644 web/src/cypress/tsconfig.json create mode 100644 web/src/graphql.config.json create mode 100644 web/src/jsconfig.json create mode 100644 web/src/package.json create mode 100644 web/src/postcss.config.js create mode 100644 web/src/prettier.config.js create mode 100644 web/src/scripts/insert-users.js create mode 100755 web/src/scripts/smoketest-prep.sh create mode 100644 web/src/webpack.config.js create mode 100644 web/src/webpack.dll.config.js create mode 100644 web/src/webpack.prod.config.js create mode 100644 web/src/yarn.lock diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000..57ee89412f --- /dev/null +++ b/.gitignore @@ -0,0 +1,57 @@ +*.log +/setup.cfg +/setup.pid +/config.json.bak + +# Created by .ignore support plugin (hsz.mobi) +### Go template +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so +*.out +*.pid +.env +.ready +/run.sh +/config.json* + +# Folders +_obj +_test +temp +bin + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# IDE +.idea +*.iml +*.ipr +.vscode +debug + +# glide +vendor +vendor.orig + +# MacOS +.DS_Store + +ngrok.log + +smoketest_db_dump diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..44434ce880 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to make participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at +[TTS-OpenSource-Office@target.com](mailto:TTS-OpenSource-Office@target.com). All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000..11e5521008 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,39 @@ +# Contributing to GoAlert + +We welcome feature requests, bug reports and contributions for code and documentation. + +## Reporting Issues + +Reporting bugs can be done in the GitHub [issue tracker](https://github.com/target/goalert/issues). Please search for a possible pre-existing issue first to help prevent duplicates. + +Please include the version (`goalert version`) with new bug reports. + +## Code Contribution + +GoAlert is already used in production environments, so any new changes/features/functionality must (where possible): + +- Not alter existing behavior without an explicit config change +- Co-exist with older versions without disruption +- Must have a safe way to disable/roll-back + +It should always be safe to roll out a new version of GoAlert into an existing environment/deployment without downtime. + +As an example, things like DB changes/migrations should preserve behavior across revisions. + +## Pull Requests + +Patches are welcome, but we ask that any significant change start as an [issue](https://github.com/target/goalert/issues/new) in the tracker, prefereably before work is started. + +Be sure to run `make check` before opening a PR to catch common errors. + +### UI Change Guidelines + +- Complex logic should be broken out with corresponding unit tests (we use [Jest](https://jestjs.io/docs/en/using-matchers)) into the same directory. For example: [util.js](./web/src/app/rotations/util.js) and [util.test.js](./web/src/app/rotations/util.test.js). +- New functionality should have an integration test (we use [Cypress](https://docs.cypress.io/guides/getting-started/writing-your-first-test.html#Write-a-simple-test) for these) testing the happy-path at a minimum. Examples [here](./web/src/cypress/integration/sidebar.ts), and [more information here](./web/src/cypress/README.md). +- React components should follow React idioms, using common prop names, and having prop-types defined. + +### Backend Change Guidelines + +- Use unit tests as a tool to validate complex logic +- New functionality should have a behavioral smoketest at a minimum. For [example](./smoketest/simplenotification_test.go). Documentation on our smoketest framework can be found [here](./smoketest/README.md). +- New Go code should pass `golint`, exported functions/methods should be commented, etc.. diff --git a/CONTRIBUTORS b/CONTRIBUTORS new file mode 100644 index 0000000000..c1ef30c9eb --- /dev/null +++ b/CONTRIBUTORS @@ -0,0 +1,21 @@ +# This is the official list of people who have contributed code to +# the GoAlert repository. +# +# Names should be added to this file like so: +# Individual's name +# Individual's name +# +# Please keep the list sorted. + +Adam Westman +Andrew Deck +Andrew From +Arundhati Rao +Dan Maas +Jared Anson +Kevin Marquardsen +Michael Black +Mitch Cimenski +Nathaniel Caza +Nathaniel Cook +Nicholas Cappo diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 0000000000..4c5a6cb4cb --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,13 @@ +Copyright (c) 2019 Target Brands, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000..55c12faa0e --- /dev/null +++ b/Makefile @@ -0,0 +1,170 @@ +.PHONY: stop start build-docker lint tools regendb resetdb +.PHONY: smoketest generate check all test test-long install install-race +.PHONY: cy-wide cy-mobile cy-wide-prod cy-mobile-prod cypress postgres +.PHONY: config.json.bak jest new-migration +.SUFFIXES: + +GOFILES = $(shell find . -path ./web/src -prune -o -path ./vendor -prune -o -path ./.git -prune -o -type f -name "*.go" -print | grep -v web/inline_data_gen.go) go.sum +INLINER = devtools/inliner/*.go +CFGPARAMS = devtools/configparams/*.go +DB_URL = postgres://goalert@localhost:5432/goalert?sslmode=disable + +LOG_DIR= +GOPATH=$(shell go env GOPATH) +BIN_DIR=bin + +GIT_VERSION=$(shell git describe --tags --dirty --match 'v*' || echo 'dev') +GIT_COMMIT=$(shell git rev-parse HEAD || echo '?') +GIT_TREE=$(shell git diff-index --quiet HEAD -- && echo clean || echo dirty) +BUILD_DATE=$(shell date -u +"%Y-%m-%dT%H:%M:%SZ") + +LD_FLAGS+=-X github.com/target/goalert/app.gitCommit=$(GIT_COMMIT) +LD_FLAGS+=-X github.com/target/goalert/app.gitVersion=$(GIT_VERSION) +LD_FLAGS+=-X github.com/target/goalert/app.gitTreeState=$(GIT_TREE) +LD_FLAGS+=-X github.com/target/goalert/app.buildDate=$(BUILD_DATE) + + +ifdef LOG_DIR +RUNJSON_ARGS += -logs=$(LOG_DIR) +endif + +export CGO_ENABLED = 0 +export PATH := $(PWD)/bin:$(PATH) +export GOOS = $(shell go env GOOS) + +ifdef BUNDLE + GOFILES += web/inline_data_gen.go +endif + +all: test install + +$(BIN_DIR)/runjson: go.sum devtools/runjson/*.go + go build -o $@ ./devtools/$(@F) +$(BIN_DIR)/waitfor: go.sum devtools/waitfor/*.go + go build -o $@ ./devtools/$(@F) +$(BIN_DIR)/simpleproxy: go.sum devtools/simpleproxy/*.go + go build -o $@ ./devtools/$(@F) +$(BIN_DIR)/mockslack: go.sum $(shell find ./devtools/mockslack -name '*.go') + go build -o $@ ./devtools/mockslack/cmd/mockslack +$(BIN_DIR)/goalert: go.sum $(GOFILES) graphql2/mapconfig.go + go build -tags "$(BUILD_TAGS)" -ldflags "$(LD_FLAGS)" -o $@ ./cmd/goalert + +install: $(GOFILES) + go install -tags "$(BUILD_TAGS)" -ldflags "$(LD_FLAGS)" ./cmd/goalert + +cypress: bin/runjson bin/waitfor bin/simpleproxy bin/mockslack bin/goalert web/src/node_modules + web/src/node_modules/.bin/cypress install + +cy-wide: cypress web/src/build/vendorPackages.dll.js + CYPRESS_viewportWidth=1440 CYPRESS_viewportHeight=900 bin/runjson $(RUNJSON_ARGS) >web/src/build/index.html + echo "" >>web/src/build/index.html + echo "" >>web/src/build/index.html + echo "" >>web/src/build/index.html + +web/inline_data_gen.go: web/src/build/index.html $(CFGPARAMS) $(INLINER) + go generate ./web + +web/src/build/vendorPackages.dll.js: web/src/node_modules web/src/webpack.dll.config.js + (cd web/src && node_modules/.bin/webpack --config ./webpack.dll.config.js --progress) + +config.json.bak: bin/goalert + (bin/goalert get-config "--db-url=$(DB_URL)" 2>/dev/null >config.json.new || echo '{"Auth":{"RefererURLs":["http://localhost:3030", "http://[::]:3030", "http://127.0.0.1:3030"]}}' >config.json.new) && mv config.json.new config.json.bak + +postgres: + docker run -d \ + --restart=always \ + -e POSTGRES_USER=goalert \ + --name goalert-postgres \ + -p 5432:5432 \ + postgres:11-alpine + +regendb: bin/goalert migrate/inline_data_gen.go config.json.bak + go run ./devtools/resetdb --with-rand-data + test -f config.json.bak && bin/goalert set-config --allow-empty-data-encryption-key "--db-url=$(DB_URL)" migrate/migrations/$(shell date +%Y%m%d%H%M%S)-$(NAME).sql + @echo "Created: migrate/migrations/$(shell date +%Y%m%d%H%M%S)-$(NAME).sql" diff --git a/README.md b/README.md new file mode 100644 index 0000000000..579f12d025 --- /dev/null +++ b/README.md @@ -0,0 +1,52 @@ +# GoAlert + +GoAlert is an on-call alerting platform written in Go. + +## All-In-One (demo) Container + +The quickest way to explore GoAlert is by using the GoAlert [all-in-one container](https://hub.docker.com/r/goalert/all-in-one). + +- Ensure you have Docker Desktop installed ([Mac](https://docs.docker.com/docker-for-mac/release-notes/) / [Windows](https://docs.docker.com/docker-for-windows/release-notes/)) +- `docker run -it --rm --name goalert-demo -p 8081:8081 goalert/all-in-one` + +Using a web browser, navigate to `http://localhost:8081` and log in with user `admin` and password `admin123`. + +## Development + +Ensure you have docker, Go, node (and yarn), and make installed. + +- If you do not have Postgres installed/configured, first run `make postgres`, GoAlert is built and tested against Postgres 11. +- For the first start, run `make regendb` to migrate and add test data into the DB. This includes an admin user `admin/admin123`. +- To start GoAlert in development mode run `make start`. +- To build the GoAlert binary run `make bin/goalert BUNDLE=1`. + +### Automated Browser Tests + +To run automated browser tests, you can start Cypress in one of the following modes: + +- `make cy-wide` Widescreen format, in dev mode. +- `make cy-mobile` Mobile format, in dev mode. +- `make cy-wide-prod` Widescreen format, production build. +- `make cy-mobile-prod` Mobile format, production build. + +### Running Smoketests + +A suite of functional/behavioral tests are maintained for the backend code. These test various APIs and behaviors +of the GoAlert server component. + +Run the full suite with `make smoketest`. + +### Running Unit Tests + +All unit tests can be run with `make test`. + +UI Unit tests are found under the directory of the file being tested, with the same file name, appended with `.test.js`. They can be run independently with `make jest`. Watch mode can be enabled with `make jest JEST_ARGS=--watch`. + +### Setup Postgres + +By default, the development code expects a postgres server configured on port `5432`, with the user and DB `goalert`. + +Alternatively, you can run `make postgres` to configure one in a docker container. + +- You can reset the dev database with `make resetdb` +- You can reset and generate random data with `make regendb`, this includes generating an admin user `admin/admin123` diff --git a/alert/alert.go b/alert/alert.go new file mode 100644 index 0000000000..5c37b5bf6c --- /dev/null +++ b/alert/alert.go @@ -0,0 +1,78 @@ +package alert + +import ( + "crypto/sha512" + "encoding/hex" + "github.com/target/goalert/validation/validate" + "strings" + "time" +) + +// maximum lengths +const ( + MaxSummaryLength = 118 + MaxDetailsLength = 6 * 1024 // 6KiB +) + +// An Alert represents an ongoing situation. +type Alert struct { + ID int `json:"_id"` + Status Status `json:"status"` + Summary string `json:"summary"` + Details string `json:"details"` + Source Source `json:"source"` + ServiceID string `json:"service_id"` + CreatedAt time.Time `json:"created_at"` + Dedup *DedupID `json:"dedup"` +} + +// DedupKey will return the de-duplication key for the alert. +// The Dedup prop is used if non-nil, otherwise one is generated +// using the Description of the Alert. +func (a *Alert) DedupKey() *DedupID { + if a.Dedup != nil { + return a.Dedup + } + + // fallback is auto:1: + sum := sha512.Sum512([]byte(a.Description())) + return &DedupID{ + Type: DedupTypeAuto, + Version: 1, + Payload: hex.EncodeToString(sum[:]), + } +} + +func (a *Alert) scanFrom(scanFn func(...interface{}) error) error { + return scanFn(&a.ID, &a.Summary, &a.Details, &a.ServiceID, &a.Source, &a.Status, &a.CreatedAt, &a.Dedup) +} + +func (a Alert) Normalize() (*Alert, error) { + if string(a.Source) == "" { + a.Source = SourceManual + } + if string(a.Status) == "" { + a.Status = StatusTriggered + } + a.Summary = strings.Replace(a.Summary, "\n", " ", -1) + a.Summary = strings.Replace(a.Summary, " ", " ", -1) + err := validate.Many( + validate.Text("Summary", a.Summary, 1, MaxSummaryLength), + validate.Text("Details", a.Details, 0, MaxDetailsLength), + validate.OneOf("Source", a.Source, SourceManual, SourceGrafana, SourceEmail, SourceGeneric), + validate.OneOf("Status", a.Status, StatusTriggered, StatusActive, StatusClosed), + validate.UUID("ServiceID", a.ServiceID), + ) + if err != nil { + return nil, err + } + + return &a, nil +} + +func (a Alert) Description() string { + if a.Details == "" { + return a.Summary + } + return a.Summary + "\n" + a.Details +} diff --git a/alert/alert_test.go b/alert/alert_test.go new file mode 100644 index 0000000000..fa1f4b7864 --- /dev/null +++ b/alert/alert_test.go @@ -0,0 +1,37 @@ +package alert + +import ( + "testing" +) + +func TestAlert_Normalize(t *testing.T) { + test := func(valid bool, a Alert) { + name := "valid" + if !valid { + name = "invalid" + } + t.Run(name, func(t *testing.T) { + t.Logf("%+v", a) + _, err := a.Normalize() + if valid && err != nil { + t.Errorf("got %v; want nil", err) + } else if !valid && err == nil { + + t.Errorf("got nil err; want non-nil") + } + }) + } + + valid := []Alert{ + {Summary: "Sample First Alert", Source: SourceManual, Status: StatusTriggered, ServiceID: "e93facc0-4764-012d-7bfb-002500d5d1a6"}, + } + invalid := []Alert{ + {ServiceID: "e93facc0-4764-012d-7bfb"}, + } + for _, a := range valid { + test(true, a) + } + for _, a := range invalid { + test(false, a) + } +} diff --git a/alert/dedup.go b/alert/dedup.go new file mode 100644 index 0000000000..5ba5e6d44a --- /dev/null +++ b/alert/dedup.go @@ -0,0 +1,85 @@ +package alert + +import ( + "database/sql/driver" + "fmt" + "github.com/target/goalert/validation/validate" + "strconv" + "strings" + + "github.com/pkg/errors" +) + +// DedupType represents a type of dedup identifier. +type DedupType string + +// DedupType can be auto or user-generated. +const ( + DedupTypeUser = DedupType("user") + DedupTypeAuto = DedupType("auto") + DedupTypeHeartbeat = DedupType("heartbeat") +) + +// DedupID represents a de-duplication ID for alerts. +type DedupID struct { + Type DedupType + Version int + Payload string +} + +// ParseDedupString will parse a string into a DedupID struct. +func ParseDedupString(s string) (*DedupID, error) { + parts := strings.SplitN(s, ":", 3) + if len(parts) != 3 { + return nil, errors.New("invalid format") + } + vers, err := strconv.Atoi(parts[1]) + if err != nil { + return nil, err + } + return &DedupID{ + Type: DedupType(parts[0]), + Version: vers, + Payload: parts[2], + }, nil +} + +// Value implements the driver.Valuer interface. +func (d DedupID) Value() (driver.Value, error) { + return fmt.Sprintf("%s:%d:%s", d.Type, d.Version, d.Payload), nil +} + +// Scan implements the sql.Scanner interface. +func (d *DedupID) Scan(value interface{}) error { + var parsed *DedupID + var err error + switch t := value.(type) { + case []byte: + parsed, err = ParseDedupString(string(t)) + case string: + parsed, err = ParseDedupString(t) + case nil: + return errors.New("can't scan nil dedup id") + default: + return errors.Errorf("could not scan unknown type for DedupID(%T)", t) + } + if err != nil { + return err + } + + *d = *parsed + return nil +} + +// NewUserDedup will create a new DedupID from a user-provided string. +func NewUserDedup(str string) *DedupID { + str = validate.SanitizeText(str, 512) + if str == "" { + return nil + } + return &DedupID{ + Type: DedupTypeUser, + Version: 1, + Payload: str, + } +} diff --git a/alert/legacysearch.go b/alert/legacysearch.go new file mode 100644 index 0000000000..4034f5d153 --- /dev/null +++ b/alert/legacysearch.go @@ -0,0 +1,203 @@ +package alert + +import ( + "context" + "database/sql" + "fmt" + "github.com/target/goalert/permission" + "github.com/target/goalert/validation/validate" + "strings" + + "github.com/pkg/errors" +) + +// LegacySearchOptions contains criteria for filtering and sorting alerts. +type LegacySearchOptions struct { + // Search is matched case-insensitive against the alert summary, id and service name. + Search string + + // ServiceID, if specified, will restrict alerts to those with a matching ServiceID. + ServiceID string + + OmitTriggered bool + OmitActive bool + OmitClosed bool + + // Limit restricts the maximum number of rows returned. Default is 50. + // Note: Limit is applied AFTER offset is taken into account. + Limit int + + // Offset indicates the starting row of the returned results. + Offset int + + // SortBy specifies the column to sort by. If anything other than ID, + // ID is used as a secondary sort in descending (newest first) order. + SortBy SortBy + + // SortDesc controls ascending or descending results of the primary sort (SortBy field). + SortDesc bool + + //FavoriteServicesOnlyUserID, if populated, filters all those alerts which belong to this user's favorite services, if empty, it is ignored. + FavoriteServicesOnlyUserID string +} + +// SortBy describes the possible primary sort options for alerts. +type SortBy int + +// Configurable sort columns. +const ( + SortByStatus SortBy = iota + SortByID + SortByCreatedTime + SortBySummary + SortByServiceName +) + +// We need to escape any characters that have meaning for `ILIKE` in Postgres. +// https://www.postgresql.org/docs/8.3/static/functions-matching.html +var searchEscape = strings.NewReplacer(`\`, `\\`, `%`, `\%`, `_`, `\_`) + +// LegacySearch will return a list of matching alerts, up to Limit, and the total number of matches +// available. +func (db *DB) LegacySearch(ctx context.Context, opts *LegacySearchOptions) ([]Alert, int, error) { + if opts == nil { + opts = &LegacySearchOptions{} + } + + userCheck := permission.User + if opts.FavoriteServicesOnlyUserID != "" { + userCheck = permission.MatchUser(opts.FavoriteServicesOnlyUserID) + } + err := permission.LimitCheckAny(ctx, permission.System, userCheck) + if err != nil { + return nil, 0, err + } + + if opts.Limit == 0 { + // default limit + opts.Limit = 50 + } + + rawSearch := opts.Search + if opts.Search != "" { + // match any substring matching the literal (escaped) search string + opts.Search = "%" + searchEscape.Replace(opts.Search) + "%" + } + err = validate.Many( + validate.Range("Limit", opts.Limit, 15, 50), + validate.Range("Offset", opts.Offset, 0, 1000000), + validate.OneOf("SortBy", opts.SortBy, SortByID, SortByStatus, SortByCreatedTime, SortBySummary, SortByServiceName), + validate.Text("Search", opts.Search, 0, 250), + ) + if opts.FavoriteServicesOnlyUserID != "" { + err = validate.Many(err, validate.UUID("FavoriteServicesOnlyUserID", opts.FavoriteServicesOnlyUserID)) + } + if err != nil { + return nil, 0, err + } + + whereStr := `WHERE + ($1 = '' or cast(a.id as text) = $6 or svc.name ilike $1 or a.summary ilike $1) and + ($2 = '' or a.service_id = cast($2 as UUID)) and + ( + ($3 and a.status = 'triggered') or + ($4 and a.status = 'active') or + ($5 and a.status = 'closed') + ) + ` + var buf strings.Builder + buf.WriteString("ORDER BY\ncase when cast(a.id as text) = $6 then 0 else 1 end,\n") + + idSortType := "DESC" + sortType := "ASC" + if opts.SortDesc { + sortType = "DESC" + } + switch opts.SortBy { + case SortByStatus: + buf.WriteString(fmt.Sprintf("a.status %s,\n", sortType)) + case SortByCreatedTime: + buf.WriteString(fmt.Sprintf("a.created_at %s,\n", sortType)) + case SortBySummary: + buf.WriteString(fmt.Sprintf("a.summary %s,\n", sortType)) + case SortByServiceName: + buf.WriteString(fmt.Sprintf("svc.name %s,\n", sortType)) + case SortByID: + if !opts.SortDesc { + idSortType = "ASC" + } + } + buf.WriteString(fmt.Sprintf("a.id %s\n", idSortType)) + + orderStr := buf.String() + + queryArgs := []interface{}{ + opts.Search, + opts.ServiceID, + !opts.OmitTriggered, + !opts.OmitActive, + !opts.OmitClosed, + rawSearch, + } + + var favServiceOnlyFilter string + // If FavoriteServicesOnlyFor userID is populated, use it for filtering + if opts.FavoriteServicesOnlyUserID != "" { + favServiceOnlyFilter = "JOIN user_favorites u ON u.tgt_service_id = a.service_id AND u.user_id = $7" + queryArgs = append(queryArgs, opts.FavoriteServicesOnlyUserID) + } + + totalQueryStr := ` + SELECT count(*) + FROM alerts a + JOIN services svc ON svc.id = a.service_id + ` + favServiceOnlyFilter + whereStr + + fetchQueryStr := fmt.Sprintf(` + SELECT + a.id, + a.summary, + a.details, + a.service_id, + a.source, + a.status, + a.created_at, + a.dedup_key + FROM alerts a + JOIN services svc ON svc.id = a.service_id + %s + %s + %s + LIMIT %d + OFFSET %d + `, favServiceOnlyFilter, whereStr, orderStr, opts.Limit, opts.Offset) + + tx, err := db.db.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelRepeatableRead}) + if err != nil { + return nil, 0, err + } + defer tx.Rollback() + + var total int + err = tx.QueryRowContext(ctx, totalQueryStr, queryArgs...).Scan(&total) + if err != nil { + return nil, 0, errors.Wrap(err, "get total results") + } + rows, err := tx.QueryContext(ctx, fetchQueryStr, queryArgs...) + if err != nil { + return nil, 0, err + } + defer rows.Close() + + var result []Alert + for rows.Next() { + var a Alert + err = a.scanFrom(rows.Scan) + if err != nil { + return nil, 0, err + } + result = append(result, a) + } + + return result, total, nil +} diff --git a/alert/log.go b/alert/log.go new file mode 100644 index 0000000000..350918e635 --- /dev/null +++ b/alert/log.go @@ -0,0 +1,40 @@ +package alert + +import ( + "fmt" + "time" +) + +// A LogEvent represents a state change of an alert. +type LogEvent string + +// Types of LogEvents +const ( + LogEventCreated LogEvent = "created" + LogEventReopened LogEvent = "reopened" + LogEventClosed LogEvent = "closed" + LogEventStatusChanged LogEvent = "status_changed" + LogEventAssignmentChanged LogEvent = "assignment_changed" + LogEventEscalated LogEvent = "escalated" +) + +// A Log is a recording of an Alert event. +type Log struct { + Timestamp time.Time `json:"timestamp"` + Event LogEvent `json:"event"` + Message string `json:"message"` +} + +// Scan handles reading a Role from the DB format +func (r *LogEvent) Scan(value interface{}) error { + switch t := value.(type) { + case []byte: + *r = LogEvent(t) + case string: + *r = LogEvent(t) + default: + return fmt.Errorf("could not process unknown type for role %T", t) + } + + return nil +} diff --git a/alert/log/entry.go b/alert/log/entry.go new file mode 100644 index 0000000000..469831944d --- /dev/null +++ b/alert/log/entry.go @@ -0,0 +1,27 @@ +package alertlog + +import ( + "time" +) + +// An Entry is an alert log entry. +type Entry interface { + // AlertID returns the ID of the alert the Entry belongs to. + AlertID() int + + // ID returns the ID of the log entry. + ID() int + // Timestamp returns the time the Entry was created. + Timestamp() time.Time + + // Type returns type type of log entry. + Type() Type + + // Subject will return the subject, if available of the Entry. + Subject() *Subject + + // String returns the string representation of a log Event. + String() string + + Meta() interface{} +} diff --git a/alert/log/legacylogs.go b/alert/log/legacylogs.go new file mode 100644 index 0000000000..b8cda6f199 --- /dev/null +++ b/alert/log/legacylogs.go @@ -0,0 +1,108 @@ +package alertlog + +import ( + "regexp" + "strings" +) + +func (e *rawEntry) subjectFromMessage() *Subject { + switch e._type { + case TypeCreated: + return createdSubject(e.message) + case TypeNotificationSent: + return notifSentSubject(e.message) + case _TypeResponseReceived: + return respRecvSubject(e.message) + case _TypeStatusChanged: + return statChgSubject(e.message) + } + + return nil +} + +var ( + respRx = regexp.MustCompile(`^(Closed|Acknowledged) by (.*) via (SMS|VOICE)$`) + notifRx = regexp.MustCompile(`^Notification sent to (.*) via (SMS|VOICE)$`) + statRx = regexp.MustCompile(`^status changed to (active|closed) by (.*)( via UI)?$`) +) + +func statChgSubject(msg string) *Subject { + parts := statRx.FindStringSubmatch(msg) + if len(parts) == 0 { + return nil + } + return &Subject{ + Type: SubjectTypeUser, + Name: parts[2], + } +} + +func statChgType(msg string) Type { + if msg == "Status updated from active to triggered" { + return TypeEscalated + } else if msg == "Status updated from triggered to active" { + return TypeAcknowledged + } else if strings.HasPrefix(msg, "status changed to closed") { + return TypeClosed + } else if strings.HasPrefix(msg, "status changed to active") { + return TypeAcknowledged + } + return "" +} + +func respRecvType(msg string) Type { + parts := respRx.FindStringSubmatch(msg) + if len(parts) == 0 { + return "" + } + switch parts[1] { + case "Closed": + return TypeClosed + case "Acknowledged": + return TypeAcknowledged + } + return "" +} +func respRecvSubject(msg string) *Subject { + parts := respRx.FindStringSubmatch(msg) + if len(parts) == 0 { + return nil + } + if parts[3] == "VOICE" { + parts[3] = "Voice" + } + return &Subject{ + Type: SubjectTypeUser, + Name: parts[2], + Classifier: parts[3], + } +} + +func notifSentSubject(msg string) *Subject { + parts := notifRx.FindStringSubmatch(msg) + if len(parts) == 0 { + return nil + } + + if parts[2] == "VOICE" { + parts[2] = "Voice" + } + + return &Subject{ + Type: SubjectTypeUser, + Name: parts[1], + Classifier: parts[2], + } +} + +func createdSubject(msg string) *Subject { + switch msg { + case "Created via: grafana": + return &Subject{Type: SubjectTypeIntegrationKey, Classifier: "Grafana"} + case "Created via: manual": + return &Subject{Type: SubjectTypeUser, Classifier: "Web"} + case "Created via: generic": + return &Subject{Type: SubjectTypeIntegrationKey, Classifier: "Generic"} + } + return nil +} diff --git a/alert/log/meta.go b/alert/log/meta.go new file mode 100644 index 0000000000..88233292f1 --- /dev/null +++ b/alert/log/meta.go @@ -0,0 +1,14 @@ +package alertlog + +type EscalationMetaData struct { + NewStepIndex int + Repeat bool + Forced bool + Deleted bool + OldDelayMinutes int +} + +type NotificationMetaData struct { + UserID string + CMType string +} diff --git a/alert/log/rawentry.go b/alert/log/rawentry.go new file mode 100644 index 0000000000..038477ab72 --- /dev/null +++ b/alert/log/rawentry.go @@ -0,0 +1,170 @@ +package alertlog + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "github.com/target/goalert/util/log" + "time" +) + +type rawEntry struct { + id int + alertID int + timestamp time.Time + _type Type + message string + subject struct { + _type SubjectType + userID sql.NullString + userName sql.NullString + integrationKeyID sql.NullString + integrationKeyName sql.NullString + heartbeatMonitorID sql.NullString + heartbeatMonitorName sql.NullString + channelID sql.NullString + channelName sql.NullString + classifier string + } + meta rawJSON +} + +func (e rawEntry) Meta() interface{} { + switch e.Type() { + case TypeEscalated: + var esc EscalationMetaData + err := json.Unmarshal(e.meta, &esc) + if err != nil { + log.Debug(context.Background(), err) + return nil + } + return &esc + } + + return nil +} +func (e rawEntry) AlertID() int { + return e.alertID +} + +func (e rawEntry) ID() int { + return e.id +} + +func (e rawEntry) Timestamp() time.Time { + return e.timestamp +} +func (e rawEntry) Type() Type { + switch e._type { + case _TypeResponseReceived: + return respRecvType(e.message) + case _TypeStatusChanged: + return statChgType(e.message) + } + + return e._type +} + +func (e rawEntry) Subject() *Subject { + if e.subject._type == SubjectTypeNone { + if e.message != "" { + return e.subjectFromMessage() + } + return nil + } + + s := &Subject{ + Type: e.subject._type, + Classifier: e.subject.classifier, + } + + switch s.Type { + case SubjectTypeUser: + s.ID = e.subject.userID.String + s.Name = e.subject.userName.String + case SubjectTypeIntegrationKey: + s.ID = e.subject.integrationKeyID.String + s.Name = e.subject.integrationKeyName.String + case SubjectTypeHeartbeatMonitor: + s.ID = e.subject.heartbeatMonitorID.String + s.Name = e.subject.heartbeatMonitorName.String + case SubjectTypeChannel: + s.ID = e.subject.channelID.String + s.Name = e.subject.channelName.String + } + + return s +} + +func escalationMsg(m *EscalationMetaData) string { + msg := fmt.Sprintf(" to step #%d", m.NewStepIndex+1) + if m.Repeat { + msg += " (policy repeat)" + } + if m.Forced { + msg += " due to manual escalation" + } else if m.Deleted { + msg += " due to current step being deleted" + } else if m.OldDelayMinutes > 0 { + msg += fmt.Sprintf(" automatically after %d minutes", m.OldDelayMinutes) + } + + return msg +} + +func (e rawEntry) String() string { + var msg string + var infinitive bool + switch e.Type() { + case TypeCreated: + msg = "Created" + case TypeAcknowledged: + msg = "Acknowledged" + case TypeClosed: + msg = "Closed" + case TypeEscalated: + msg = "Escalated" + meta, ok := e.Meta().(*EscalationMetaData) + if ok { + msg += escalationMsg(meta) + } + case TypeNotificationSent: + msg = "Notification sent" + infinitive = true + case TypePolicyUpdated: + msg = "Policy updated" + case TypeDuplicateSupressed: + msg = "Suppressed duplicate: created" + case TypeEscalationRequest: + msg = "Escalation requested" + default: + return "Error" + } + + // include subject, if available + msg += subjectString(infinitive, e.Subject()) + + return msg +} + +func (e *rawEntry) scanWith(scan func(...interface{}) error) error { + return scan( + &e.id, + &e.alertID, + &e.timestamp, + &e._type, + &e.message, + &e.subject._type, + &e.subject.userID, + &e.subject.userName, + &e.subject.integrationKeyID, + &e.subject.integrationKeyName, + &e.subject.heartbeatMonitorID, + &e.subject.heartbeatMonitorName, + &e.subject.channelID, + &e.subject.channelName, + &e.subject.classifier, + &e.meta, + ) +} diff --git a/alert/log/rawjson.go b/alert/log/rawjson.go new file mode 100644 index 0000000000..c59ec9b8ea --- /dev/null +++ b/alert/log/rawjson.go @@ -0,0 +1,30 @@ +package alertlog + +import ( + "database/sql/driver" + "encoding/json" + "fmt" +) + +type rawJSON json.RawMessage + +func (r *rawJSON) Scan(value interface{}) error { + switch t := value.(type) { + case []byte: + buf := make([]byte, len(t)) + copy(buf, t) + *r = rawJSON(buf) + case nil: + + default: + return fmt.Errorf("could not process unknown type %T", t) + } + + return nil +} +func (r rawJSON) Value() (driver.Value, error) { + if len(r) == 0 { + return nil, nil + } + return []byte(r), nil +} diff --git a/alert/log/store.go b/alert/log/store.go new file mode 100644 index 0000000000..5a31965dcf --- /dev/null +++ b/alert/log/store.go @@ -0,0 +1,739 @@ +package alertlog + +import ( + "bytes" + "context" + "database/sql" + "encoding/json" + "fmt" + "github.com/target/goalert/integrationkey" + "github.com/target/goalert/notificationchannel" + "github.com/target/goalert/permission" + "github.com/target/goalert/user/contactmethod" + "github.com/target/goalert/util" + "github.com/target/goalert/util/log" + "github.com/target/goalert/validation" + "github.com/target/goalert/validation/validate" + "strings" + "time" + + "github.com/lib/pq" + "github.com/pkg/errors" +) + +type Store interface { + FindOne(ctx context.Context, logID int) (Entry, error) + FindAll(ctx context.Context, alertID int) ([]Entry, error) + Log(ctx context.Context, alertID int, _type Type, meta interface{}) error + LogTx(ctx context.Context, tx *sql.Tx, alertID int, _type Type, meta interface{}) error + LogEPTx(ctx context.Context, tx *sql.Tx, epID string, _type Type, meta *EscalationMetaData) error + LogServiceTx(ctx context.Context, tx *sql.Tx, serviceID string, _type Type, meta interface{}) error + LogManyTx(ctx context.Context, tx *sql.Tx, alertIDs []int, _type Type, meta interface{}) error + FindLatestByType(ctx context.Context, alertID int, status Type) (Entry, error) + Search(ctx context.Context, opt *SearchOptions) ([]Entry, int, error) + + MustLog(ctx context.Context, alertID int, _type Type, meta interface{}) + MustLogTx(ctx context.Context, tx *sql.Tx, alertID int, _type Type, meta interface{}) +} + +var _ Store = &DB{} + +type DB struct { + db *sql.DB + + insert *sql.Stmt + insertEP *sql.Stmt + insertSvc *sql.Stmt + findAll *sql.Stmt + findAllByType *sql.Stmt + findOne *sql.Stmt + + lookupCallbackType *sql.Stmt + lookupIKeyType *sql.Stmt + lookupCMType *sql.Stmt + lookupNCTypeName *sql.Stmt + lookupHBInterval *sql.Stmt +} + +// SearchOptions contains criteria for filtering alert logs. At a minimum, at least one of AlertID or ServiceID must be specified. +type SearchOptions struct { + /// AlertID, if specified, will restrict alert logs to those with a matching AlertID. + AlertID int + + // ServiceID, if specified, will restrict alert logs to those alerts which map to this particular ServiceID. + ServiceID string + + // UserID, if specified, will restrict alert logs to those with events performed by the specified user. + UserID string + + // IntegrationKeyID, if specified, will restrict alert logs to those with events authorized via the specified integration key. + IntegrationKeyID string + + // Start will restrict alert logs to those which were created on or after this time. + Start time.Time + + // End will restrict alert logs to those which were created before this time. + End time.Time + + // Event, if specified, will restrict alert logs to those of the specified event type. + Event Type + + // SortBy can be used to alter the primary sorting criteria. By default, results are ordered by timestamp as newest first. + // Results will always have a secondary sort criteria of newest-events-first, unless SortByTimestamp is set and SortDesc is false. + SortBy SortBy + + // SortDesc controls ascending or descending results of the primary sort (SortBy field). + SortDesc bool + + // Offset indicates the starting row of the returned results. + Offset int + + // Limit restricts the maximum number of rows returned. Default is 25. Maximum is 50. + // Note: Limit is applied AFTER Offset is taken into account. + Limit int +} + +// SortBy describes the possible primary sort options for alert logs. +type SortBy int + +// Configurable sort columns. +const ( + SortByTimestamp SortBy = iota + SortByAlertID + SortByEventType + SortByUserName +) + +func NewDB(ctx context.Context, db *sql.DB) (*DB, error) { + p := &util.Prepare{DB: db, Ctx: ctx} + + return &DB{ + db: db, + lookupCallbackType: p.P(` + select cm."type" + from outgoing_messages log + join user_contact_methods cm on cm.id = log.contact_method_id + where log.id = $1 + `), + lookupCMType: p.P(` + select "type" from user_contact_methods where id = $1 + `), + lookupNCTypeName: p.P(` + select "type", name from notification_channels where id = $1 + `), + lookupHBInterval: p.P(` + select extract(epoch from heartbeat_interval)/60 from heartbeat_monitors where id = $1 + `), + lookupIKeyType: p.P(`select "type" from integration_keys where id = $1`), + insertEP: p.P(` + insert into alert_logs ( + alert_id, + event, + sub_type, + sub_user_id, + sub_integration_key_id, + sub_hb_monitor_id, + sub_channel_id, + sub_classifier, + meta, + message + ) + select + a.id, $2, $3, $4, $5, $6, $7, $8, $9, $10 + from alerts a + join services svc on svc.id = a.service_id and svc.escalation_policy_id = ANY ($1) + where a.status != 'closed' + `), + insertSvc: p.P(` + insert into alert_logs ( + alert_id, + event, + sub_type, + sub_user_id, + sub_integration_key_id, + sub_hb_monitor_id, + sub_channel_id, + sub_classifier, + meta, + message + ) + select + a.id, $2, $3, $4, $5, $6, $7, $8, $9, $10 + from alerts a + where a.service_id = ANY ($1) and ( + ($2 = 'closed'::enum_alert_log_event and a.status != 'closed') or + ($2 = 'acknowledged'::enum_alert_log_event and a.status = 'triggered') + ) + `), + insert: p.P(` + insert into alert_logs ( + alert_id, + event, + sub_type, + sub_user_id, + sub_integration_key_id, + sub_hb_monitor_id, + sub_channel_id, + sub_classifier, + meta, + message + ) + SELECT unnest, $2, $3, $4, $5, $6, $7, $8, $9, $10 + FROM unnest($1::int[]) + `), + findOne: p.P(` + select + log.id, + log.alert_id, + log.timestamp, + log.event, + log.message, + log.sub_type, + log.sub_user_id, + usr.name, + log.sub_integration_key_id, + ikey.name, + log.sub_hb_monitor_id, + hb.name, + log.sub_channel_id, + nc.name, + log.sub_classifier, + log.meta + from alert_logs log + left join users usr on usr.id = log.sub_user_id + left join integration_keys ikey on ikey.id = log.sub_integration_key_id + left join heartbeat_monitors hb on hb.id = log.sub_hb_monitor_id + left join notification_channels nc on nc.id = log.sub_channel_id + where log.id = $1 + `), + findAll: p.P(` + select + log.id, + log.alert_id, + log.timestamp, + log.event, + log.message, + log.sub_type, + log.sub_user_id, + usr.name, + log.sub_integration_key_id, + ikey.name, + log.sub_hb_monitor_id, + hb.name, + log.sub_channel_id, + nc.name, + log.sub_classifier, + log.meta + from alert_logs log + left join users usr on usr.id = log.sub_user_id + left join integration_keys ikey on ikey.id = log.sub_integration_key_id + left join heartbeat_monitors hb on hb.id = log.sub_hb_monitor_id + left join notification_channels nc on nc.id = log.sub_channel_id + where log.alert_id = $1 + order by id + `), + findAllByType: p.P(` + select + log.id, + log.alert_id, + log.timestamp, + log.event, + log.message, + log.sub_type, + log.sub_user_id, + usr.name, + log.sub_integration_key_id, + ikey.name, + log.sub_hb_monitor_id, + hb.name, + log.sub_channel_id, + nc.name, + log.sub_classifier, + log.meta + from alert_logs log + left join users usr on usr.id = log.sub_user_id + left join integration_keys ikey on ikey.id = log.sub_integration_key_id + left join heartbeat_monitors hb on hb.id = log.sub_hb_monitor_id + left join notification_channels nc on nc.id = log.sub_channel_id + where log.alert_id = $1 and log.event = $2 + order by id DESC + limit 1 + `), + }, p.Err +} + +func (db *DB) MustLog(ctx context.Context, alertID int, _type Type, meta interface{}) { + db.MustLogTx(ctx, nil, alertID, _type, meta) +} +func (db *DB) MustLogTx(ctx context.Context, tx *sql.Tx, alertID int, _type Type, meta interface{}) { + err := db.LogTx(ctx, tx, alertID, _type, meta) + if err != nil { + log.Log(ctx, errors.Wrap(err, "append alert log")) + } +} +func (db *DB) LogEPTx(ctx context.Context, tx *sql.Tx, epID string, _type Type, meta *EscalationMetaData) error { + err := validate.UUID("EscalationPolicyID", epID) + if err != nil { + return err + } + return db.logAny(ctx, tx, db.insertEP, epID, _type, meta) +} +func (db *DB) LogServiceTx(ctx context.Context, tx *sql.Tx, serviceID string, _type Type, meta interface{}) error { + err := validate.UUID("ServiceID", serviceID) + if err != nil { + return err + } + t := _type + switch _type { + case TypeAcknowledged: + t = _TypeAcknowledgeAll + case TypeClosed: + t = _TypeCloseAll + } + return db.logAny(ctx, tx, db.insertSvc, serviceID, t, meta) +} + +func (db *DB) LogManyTx(ctx context.Context, tx *sql.Tx, alertIDs []int, _type Type, meta interface{}) error { + return db.logAny(ctx, tx, db.insert, alertIDs, _type, meta) +} + +func (db *DB) Log(ctx context.Context, alertID int, _type Type, meta interface{}) error { + return db.LogTx(ctx, nil, alertID, _type, meta) +} + +func (db *DB) LogTx(ctx context.Context, tx *sql.Tx, alertID int, _type Type, meta interface{}) error { + return db.logAny(ctx, tx, db.insert, alertID, _type, meta) +} +func txWrap(ctx context.Context, tx *sql.Tx, stmt *sql.Stmt) *sql.Stmt { + if tx == nil { + return stmt + } + return tx.StmtContext(ctx, stmt) +} +func (db *DB) logAny(ctx context.Context, tx *sql.Tx, insertStmt *sql.Stmt, id interface{}, _type Type, meta interface{}) error { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return err + } + + var classExtras []string + switch _type { + case _TypeAcknowledgeAll: + classExtras = append(classExtras, "Ack-All") + _type = TypeAcknowledged + case _TypeCloseAll: + classExtras = append(classExtras, "Close-All") + _type = TypeClosed + } + + var r rawEntry + r._type = _type + + if meta != nil { + r.meta, err = json.Marshal(meta) + if err != nil { + return err + } + } + + src := permission.Source(ctx) + if src != nil { + switch src.Type { + case permission.SourceTypeNotificationChannel: + r.subject._type = SubjectTypeChannel + var ncType notificationchannel.Type + var name string + err = txWrap(ctx, tx, db.lookupNCTypeName).QueryRowContext(ctx, src.ID).Scan(&ncType, &name) + if err != nil { + return errors.Wrap(err, "lookup contact method type for callback ID") + } + + switch ncType { + case notificationchannel.TypeSlack: + r.subject.classifier = "Slack" + } + r.subject.channelID.String = src.ID + r.subject.channelID.Valid = true + case permission.SourceTypeAuthProvider: + r.subject.classifier = "Web" + r.subject._type = SubjectTypeUser + + r.subject.userID.String = permission.UserID(ctx) + if r.subject.userID.String != "" { + r.subject.userID.Valid = true + } + case permission.SourceTypeContactMethod: + r.subject._type = SubjectTypeUser + var cmType contactmethod.Type + err = txWrap(ctx, tx, db.lookupCMType).QueryRowContext(ctx, src.ID).Scan(&cmType) + if err != nil { + return errors.Wrap(err, "lookup contact method type for callback ID") + } + switch cmType { + case contactmethod.TypeVoice: + r.subject.classifier = "Voice" + case contactmethod.TypeSMS: + r.subject.classifier = "SMS" + case contactmethod.TypeEmail: + r.subject.classifier = "Email" + } + r.subject.userID.String = permission.UserID(ctx) + if r.subject.userID.String != "" { + r.subject.userID.Valid = true + } + case permission.SourceTypeNotificationCallback: + r.subject._type = SubjectTypeUser + var cmType contactmethod.Type + err = txWrap(ctx, tx, db.lookupCallbackType).QueryRowContext(ctx, src.ID).Scan(&cmType) + if err != nil { + return errors.Wrap(err, "lookup contact method type for callback ID") + } + switch cmType { + case contactmethod.TypeVoice: + r.subject.classifier = "Voice" + case contactmethod.TypeSMS: + r.subject.classifier = "SMS" + case contactmethod.TypeEmail: + r.subject.classifier = "Email" + } + r.subject.userID.String = permission.UserID(ctx) + if r.subject.userID.String != "" { + r.subject.userID.Valid = true + } + case permission.SourceTypeHeartbeat: + r.subject._type = SubjectTypeHeartbeatMonitor + var minutes int + err = txWrap(ctx, tx, db.lookupHBInterval).QueryRowContext(ctx, src.ID).Scan(&minutes) + if err != nil { + return errors.Wrap(err, "lookup heartbeat monitor interval by ID") + } + if r.Type() == TypeCreated { + s := "s" + if minutes == 1 { + s = "" + } + r.subject.classifier = fmt.Sprintf("expired after %d minute"+s, minutes) + } else if r.Type() == TypeClosed { + r.subject.classifier = fmt.Sprintf("healthy") + } + r.subject.heartbeatMonitorID.Valid = true + r.subject.heartbeatMonitorID.String = src.ID + case permission.SourceTypeIntegrationKey: + r.subject._type = SubjectTypeIntegrationKey + var ikeyType integrationkey.Type + err = txWrap(ctx, tx, db.lookupIKeyType).QueryRowContext(ctx, src.ID).Scan(&ikeyType) + if err != nil { + return errors.Wrap(err, "lookup integration key type by ID") + } + switch ikeyType { + case integrationkey.TypeGeneric: + r.subject.classifier = "Generic API" + case integrationkey.TypeGrafana: + r.subject.classifier = "Grafana" + case integrationkey.TypeEmail: + r.subject.classifier = "Email" + } + r.subject.integrationKeyID.Valid = true + r.subject.integrationKeyID.String = src.ID + } + } + + if r.subject.classifier != "" { + classExtras = append([]string{r.subject.classifier}, classExtras...) + } + r.subject.classifier = strings.Join(classExtras, ", ") + + var idArg interface{} + + switch t := id.(type) { + case string: + idArg = pq.StringArray{t} + case int: + idArg = pq.Int64Array{int64(t)} + case []int: + ids64 := make(pq.Int64Array, len(t)) + for i, id := range t { + ids64[i] = int64(id) + } + idArg = ids64 + default: + return errors.Errorf("invalid id type %T", t) + } + + _, err = txWrap(ctx, tx, insertStmt).ExecContext(ctx, idArg, _type, r.subject._type, r.subject.userID, r.subject.integrationKeyID, r.subject.heartbeatMonitorID, r.subject.channelID, r.subject.classifier, r.meta, r.String()) + return err +} +func (db *DB) FindOne(ctx context.Context, logID int) (Entry, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + + var e rawEntry + row := db.findOne.QueryRowContext(ctx, logID) + err = e.scanWith(row.Scan) + if err != nil { + return nil, err + } + + return &e, nil +} +func (db *DB) FindAll(ctx context.Context, alertID int) ([]Entry, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + + rows, err := db.findAll.QueryContext(ctx, alertID) + if err != nil { + return nil, err + } + defer rows.Close() + + var raw []rawEntry + var e rawEntry + for rows.Next() { + err := e.scanWith(rows.Scan) + if err != nil { + return nil, err + } + raw = append(raw, e) + } + + return dedupEvents(raw), nil +} + +func dedupEvents(raw []rawEntry) []Entry { + var cur Entry + var result []Entry + for _, e := range raw { + switch e.Type() { + case TypeCreated, TypeAcknowledged, TypeEscalationRequest, TypeEscalated: + // these are the ones we want to dedup + default: + if cur != nil { + result = append(result, cur) + cur = nil + } + result = append(result, e) + continue + } + if cur == nil { + cur = e + continue + } + + if e.Type() != cur.Type() { + result = append(result, cur) + cur = e + continue + } + + eSub := e.Subject() + if eSub == nil { + // no new subject info + continue + } + + cSub := cur.Subject() + if cSub == nil { + // old one has none, new one does + cur = e + continue + } + + // both have subjects, only replace if the new one + // has a classifier + if eSub.Classifier != "" { + cur = e + continue + } + } + if cur != nil { + result = append(result, cur) + } + + return result +} + +// FindLatestByType returns the latest Log Entry given alertID and status type +func (db *DB) FindLatestByType(ctx context.Context, alertID int, status Type) (Entry, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + var e rawEntry + row := db.findAllByType.QueryRowContext(ctx, alertID, status) + err = e.scanWith(row.Scan) + if err != nil { + return nil, err + } + return e, nil +} + +// Search will return a list of matching log entries +func (db *DB) Search(ctx context.Context, opts *SearchOptions) ([]Entry, int, error) { + err := permission.LimitCheckAny(ctx, permission.Admin, permission.User, permission.System) + if err != nil { + return nil, 0, err + } + + if opts.Limit == 0 { + // default limit + opts.Limit = 25 + } + + if opts.ServiceID == "" && opts.AlertID == 0 { + err = validation.NewFieldError("SearchOptions", "One of AlertID or ServiceID must be specified") + } + + err = validate.Many( + err, + validate.Range("Limit", opts.Limit, 1, 50), + validate.Range("Offset", opts.Offset, 0, 1000000), + validate.OneOf("SortBy", opts.SortBy, + SortByAlertID, + SortByEventType, + SortByTimestamp, + SortByUserName), + ) + if err != nil { + return nil, 0, err + } + + var buf bytes.Buffer + idSortType := "DESC" + // sortType only applies to user-specified parameter + sortType := "ASC" + if opts.SortDesc { + sortType = "DESC" + } + + buf.WriteString("ORDER BY ") + + switch opts.SortBy { + case SortByTimestamp: + if !opts.SortDesc { // if SortDesc is false + idSortType = "ASC" + } + case SortByAlertID: + buf.WriteString(fmt.Sprintf("a.alert_id %s,\n", sortType)) + case SortByEventType: + buf.WriteString(fmt.Sprintf("cast(a.event as text) %s,\n", sortType)) + case SortByUserName: + buf.WriteString(fmt.Sprintf("u.name %s,\n", sortType)) + } + + // idSortType is applied to both timestamp and id + buf.WriteString(fmt.Sprintf("a.timestamp %s,\n", idSortType)) + buf.WriteString(fmt.Sprintf("a.id %s\n", idSortType)) + + orderStr := buf.String() + // Refer to https://github.com/jackc/pgx/issues/281 on why to include a typecast before comparing to null + whereStr := `WHERE + ($1 = '0' or a.alert_id = $1 ::int) and + ($2 = '' or alerts.service_id = cast($2 as UUID)) and + (coalesce(a.timestamp >= cast($3 as timestamp with time zone), true)) and + (coalesce(a.timestamp < cast($4 as timestamp with time zone), true)) and + ($5 = '' or a.event = $5::enum_alert_log_event)and + ($6 = '' or a.sub_user_id = cast($6 as UUID)) and + ($7 = '' or a.sub_integration_key_id = cast($7 as UUID))` + + fetchQueryStr := fmt.Sprintf(` + SELECT + a.id, + a.alert_id, + a.timestamp, + a.event, + a.message, + a.sub_type, + a.sub_user_id, + u.name, + a.sub_integration_key_id, + i.name, + a.sub_hb_monitor_id, + hb.name, + a.sub_channel_id, + nc.name, + a.sub_classifier, + a.meta + FROM alert_logs a + LEFT JOIN alerts ON alerts.id = a.alert_id + LEFT JOIN users u ON u.id = a.sub_user_id + LEFT JOIN integration_keys i ON i.id = a.sub_integration_key_id + LEFT JOIN heartbeat_monitors hb ON hb.id = a.sub_hb_monitor_id + LEFT JOIN notification_channels nc ON nc.id = a.sub_channel_id + %s + %s + LIMIT %d + OFFSET %d + `, whereStr, orderStr, opts.Limit, opts.Offset) + + totalQueryStr := ` + SELECT count(*) + FROM alert_logs a + JOIN alerts ON alerts.id = a.alert_id + ` + whereStr + + tx, err := db.db.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelRepeatableRead}) + if err != nil { + return nil, 0, err + } + defer tx.Rollback() + + var start, end pq.NullTime + if !opts.Start.IsZero() { + start.Valid = true + start.Time = opts.Start + } + if !opts.End.IsZero() { + end.Valid = true + end.Time = opts.End + } + + var total int + err = tx.QueryRowContext(ctx, totalQueryStr, + opts.AlertID, + opts.ServiceID, + start, + end, + opts.Event, + opts.UserID, + opts.IntegrationKeyID, + ).Scan(&total) + if err != nil { + return nil, 0, errors.Wrap(err, "get total results") + } + + rows, err := tx.QueryContext(ctx, fetchQueryStr, + opts.AlertID, + opts.ServiceID, + start, + end, + opts.Event, + opts.UserID, + opts.IntegrationKeyID, + ) + if err != nil { + return nil, 0, err + } + defer rows.Close() + + var result []rawEntry + + for rows.Next() { + var r rawEntry + err = r.scanWith(rows.Scan) + if err != nil { + return nil, 0, err + } + result = append(result, r) + } + var logs []Entry + for _, e := range result { + logs = append(logs, e) + } + + return logs, total, nil + +} diff --git a/alert/log/subject.go b/alert/log/subject.go new file mode 100644 index 0000000000..52ddeccc08 --- /dev/null +++ b/alert/log/subject.go @@ -0,0 +1,86 @@ +package alertlog + +import ( + "database/sql/driver" + "fmt" +) + +// SubjectType represents the type of subject or causer of an alert event. +type SubjectType string + +// Possible subject types +const ( + SubjectTypeUser SubjectType = "user" + SubjectTypeIntegrationKey SubjectType = "integration_key" + SubjectTypeHeartbeatMonitor SubjectType = "heartbeat_monitor" + SubjectTypeChannel SubjectType = "channel" + SubjectTypeNone SubjectType = "" +) + +// Scan handles reading a Type from the DB enum +func (s *SubjectType) Scan(value interface{}) error { + switch t := value.(type) { + case []byte: + *s = SubjectType(t) + case string: + *s = SubjectType(t) + case nil: + *s = SubjectTypeNone + default: + return fmt.Errorf("could not process unknown type %T", t) + } + + return nil +} +func (s SubjectType) Value() (driver.Value, error) { + switch s { + case SubjectTypeUser, SubjectTypeIntegrationKey, SubjectTypeHeartbeatMonitor, SubjectTypeChannel: + return string(s), nil + default: + return nil, nil + } +} + +// A Subject is generally the causer of an event. If a user closes an alert, +// the entry would have a Subject set to the user. +type Subject struct { + ID string `json:"id"` + Name string `json:"name"` + Type SubjectType `json:"type"` + Classifier string `json:"classifier"` +} + +func subjectString(infinitive bool, s *Subject) string { + if s == nil { + return "" + } + var str string + if infinitive { + str += " to" + } else { + switch s.Type { + case SubjectTypeUser: + str += " by" + case SubjectTypeNone: + return "" + default: + str += " via" + } + } + if s.Name == "" { + str += " [unknown]" + } else { + str += " " + s.Name + } + switch s.Type { + case SubjectTypeIntegrationKey: + str += " integration" + case SubjectTypeHeartbeatMonitor: + str += " heartbeat monitor" + } + + if s.Classifier != "" { + str += " (" + s.Classifier + ")" + } + return str +} diff --git a/alert/log/type.go b/alert/log/type.go new file mode 100644 index 0000000000..5e2a9fb339 --- /dev/null +++ b/alert/log/type.go @@ -0,0 +1,42 @@ +package alertlog + +import "fmt" + +// A Type represents a log entry type for an alert. +type Type string + +// Types of Log Entries +const ( + TypeCreated Type = "created" + TypeClosed Type = "closed" + TypeNotificationSent Type = "notification_sent" + TypeEscalated Type = "escalated" + TypeAcknowledged Type = "acknowledged" + TypePolicyUpdated Type = "policy_updated" + TypeDuplicateSupressed Type = "duplicate_suppressed" + TypeEscalationRequest Type = "escalation_request" + + // not exported, status_changed will be turned into an acknowledged where appropriate + _TypeStatusChanged Type = "status_changed" + + // not exported, response_received will be turned into an ack or closed + _TypeResponseReceived Type = "response_received" + + // Mapped to Ack and Close + _TypeAcknowledgeAll Type = "ack_all" + _TypeCloseAll Type = "close_all" +) + +// Scan handles reading a Type from the DB enum +func (ty *Type) Scan(value interface{}) error { + switch t := value.(type) { + case []byte: + *ty = Type(t) + case string: + *ty = Type(t) + default: + return fmt.Errorf("could not process unknown type %T", t) + } + + return nil +} diff --git a/alert/logentryfetcher.go b/alert/logentryfetcher.go new file mode 100644 index 0000000000..59c92d842b --- /dev/null +++ b/alert/logentryfetcher.go @@ -0,0 +1,49 @@ +package alert + +import ( + "context" + alertlog "github.com/target/goalert/alert/log" + + "github.com/pkg/errors" +) + +type LogEntryFetcher interface { + // LogEntry fetchs the latest log entry for a given alertID and type. + LogEntry(ctx context.Context) (alertlog.Entry, error) +} + +type logError struct { + isAlreadyAcknowledged bool + isAlreadyClosed bool + alertID int + _type alertlog.Type + logDB alertlog.Store +} + +func (e logError) LogEntry(ctx context.Context) (alertlog.Entry, error) { + return e.logDB.FindLatestByType(ctx, e.alertID, e._type) +} + +func (e logError) Error() string { + if e.isAlreadyAcknowledged { + return "alert is already acknowledged" + } + if e.isAlreadyClosed { + return "alert is already closed" + } + return "unknown status update" +} + +func IsAlreadyAcknowledged(err error) bool { + if e, ok := errors.Cause(err).(logError); ok { + return e.isAlreadyAcknowledged + } + return false +} + +func IsAlreadyClosed(err error) bool { + if e, ok := errors.Cause(err).(logError); ok { + return e.isAlreadyClosed + } + return false +} diff --git a/alert/search.go b/alert/search.go new file mode 100644 index 0000000000..50e71ff99b --- /dev/null +++ b/alert/search.go @@ -0,0 +1,177 @@ +package alert + +import ( + "context" + "database/sql" + "github.com/target/goalert/permission" + "github.com/target/goalert/search" + "github.com/target/goalert/validation/validate" + "strconv" + "text/template" + + "github.com/lib/pq" + "github.com/pkg/errors" +) + +// SearchOptions contains criteria for filtering and sorting alerts. +type SearchOptions struct { + // Search is matched case-insensitive against the alert summary, id and service name. + Search string `json:"s,omitempty"` + + // Status, if specified, will restrict alerts to those with a matching status. + Status []Status `json:"t,omitempty"` + + // Services, if specified, will restrict alerts to those with a matching ServiceID. + Services []string `json:"v,omitempty"` + + After SearchCursor `json:"a,omitempty"` + + // Omit specifies a list of alert IDs to exclude from the results. + Omit []int + + // Limit restricts the maximum number of rows returned. Default is 50. + // Note: Limit is applied AFTER AfterID is taken into account. + Limit int `json:"-"` +} + +type SearchCursor struct { + ID int `json:"i,omitempty"` + Status Status `json:"s,omitempty"` +} + +var searchTemplate = template.Must(template.New("search").Parse(` + SELECT + a.id, + a.summary, + a.details, + a.service_id, + a.source, + a.status, + created_at, + a.dedup_key + FROM alerts a + {{ if .Search }} + JOIN services svc ON svc.id = a.service_id + {{ end }} + WHERE true + {{if .Omit}} + AND not a.id = any(:omit) + {{end}} + {{ if .Search }} + AND ( + a.summary ilike :search OR + svc.name ilike :search + ) + {{ end }} + {{ if .Status }} + AND a.status = any(:status::enum_alert_status[]) + {{ end }} + {{ if .Services }} + AND a.service_id = any(:services) + {{ end }} + {{ if .After.ID }} + AND ( + a.status > :afterStatus::enum_alert_status OR + (a.status = :afterStatus::enum_alert_status AND a.id < :afterID + ) + {{ end }} + ORDER BY status, id DESC + LIMIT {{.Limit}} +`)) + +type renderData SearchOptions + +func (opts renderData) SearchStr() string { + if opts.Search == "" { + return "" + } + + return "%" + search.Escape(opts.Search) + "%" +} + +func (opts renderData) Normalize() (*renderData, error) { + if opts.Limit == 0 { + opts.Limit = search.DefaultMaxResults + } + + err := validate.Many( + validate.Text("Search", opts.Search, 0, search.MaxQueryLen), + validate.Range("Limit", opts.Limit, 0, search.MaxResults), + validate.Range("Status", len(opts.Status), 0, 3), + validate.ManyUUID("Services", opts.Services, 50), + validate.Range("Omit", len(opts.Omit), 0, 50), + ) + if opts.After.Status != "" { + err = validate.Many(err, validate.OneOf("After.Status", opts.After.Status, StatusTriggered, StatusActive, StatusClosed)) + } + if err != nil { + return nil, err + } + + for i, stat := range opts.Status { + err = validate.OneOf("Status["+strconv.Itoa(i)+"]", stat, StatusTriggered, StatusActive, StatusClosed) + if err != nil { + return nil, err + } + } + + return &opts, err +} + +func (opts renderData) QueryArgs() []sql.NamedArg { + stat := make(pq.StringArray, len(opts.Status)) + for i := range opts.Status { + stat[i] = string(opts.Status[i]) + } + omit := make(pq.Int64Array, len(opts.Omit)) + for i := range opts.Omit { + omit[i] = int64(opts.Omit[i]) + } + return []sql.NamedArg{ + sql.Named("search", opts.SearchStr()), + sql.Named("status", stat), + sql.Named("services", pq.StringArray(opts.Services)), + sql.Named("afterID", opts.After.ID), + sql.Named("afterStatus", opts.After.Status), + sql.Named("omit", pq.Int64Array(omit)), + } +} + +func (db *DB) Search(ctx context.Context, opts *SearchOptions) ([]Alert, error) { + err := permission.LimitCheckAny(ctx, permission.System, permission.User) + if err != nil { + return nil, err + } + if opts == nil { + opts = new(SearchOptions) + } + + data, err := (*renderData)(opts).Normalize() + if err != nil { + return nil, err + } + + query, args, err := search.RenderQuery(ctx, searchTemplate, data) + if err != nil { + return nil, errors.Wrap(err, "render query") + } + + rows, err := db.db.QueryContext(ctx, query, args...) + if err != nil { + return nil, errors.Wrap(err, "query") + } + defer rows.Close() + + alerts := make([]Alert, 0, opts.Limit) + + for rows.Next() { + var a Alert + err = errors.Wrap(a.scanFrom(rows.Scan), "scan") + if err != nil { + return nil, err + } + alerts = append(alerts, a) + } + + return alerts, nil +} diff --git a/alert/source.go b/alert/source.go new file mode 100644 index 0000000000..30aed8d3da --- /dev/null +++ b/alert/source.go @@ -0,0 +1,37 @@ +package alert + +import ( + "database/sql/driver" + "fmt" +) + +// Source is the entity that triggered an alert. +type Source string + +// Source types +const ( + SourceEmail Source = "email" // email alert + SourceGrafana Source = "grafana" // grafana alert + SourceManual Source = "manual" // manually triggered + SourceGeneric Source = "generic" // generic API +) + +func (s Source) Value() (driver.Value, error) { + str := string(s) + if str == "" { + str = string(SourceManual) + } + return str, nil +} + +func (s *Source) Scan(value interface{}) error { + switch t := value.(type) { + case []byte: + *s = Source(t) + case string: + *s = Source(t) + default: + return fmt.Errorf("could not process unknown type for source %T", t) + } + return nil +} diff --git a/alert/state.go b/alert/state.go new file mode 100644 index 0000000000..f69f03a427 --- /dev/null +++ b/alert/state.go @@ -0,0 +1,13 @@ +package alert + +import ( + "time" +) + +// State represents the current escalation state of an alert. +type State struct { + AlertID int + StepNumber int + RepeatCount int + LastEscalation time.Time +} diff --git a/alert/status.go b/alert/status.go new file mode 100644 index 0000000000..b7d2d58c9c --- /dev/null +++ b/alert/status.go @@ -0,0 +1,38 @@ +package alert + +import ( + "database/sql/driver" + "fmt" +) + +// Status is the current state of an Alert. +type Status string + +// Alert status types +const ( + StatusTriggered Status = "triggered" + StatusActive Status = "active" + StatusClosed Status = "closed" +) + +func (s Status) Value() (driver.Value, error) { + str := string(s) + if str == "" { + str = string(StatusTriggered) + } + return str, nil +} + +func (s *Status) Scan(value interface{}) error { + switch t := value.(type) { + case []byte: + *s = Status(t) + case string: + *s = Status(t) + case nil: + *s = StatusTriggered + default: + return fmt.Errorf("could not process unknown type for Status(%T)", t) + } + return nil +} diff --git a/alert/store.go b/alert/store.go new file mode 100644 index 0000000000..fee1f89432 --- /dev/null +++ b/alert/store.go @@ -0,0 +1,817 @@ +package alert + +import ( + "context" + "database/sql" + alertlog "github.com/target/goalert/alert/log" + "github.com/target/goalert/permission" + "github.com/target/goalert/util" + "github.com/target/goalert/util/log" + "github.com/target/goalert/validation" + "github.com/target/goalert/validation/validate" + "time" + + "github.com/lib/pq" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +const maxBatch = 500 + +type Store interface { + Manager + Create(context.Context, *Alert) (*Alert, error) + + // CreateOrUpdate will create an alert or log a "duplicate suppressed message" if + // Status is Triggered. If Status is Closed, it will close and return the result. + // + // In the case that Status is closed but a matching alert is not present, nil is returned. + // Otherwise the current alert is returned. + CreateOrUpdate(context.Context, *Alert) (*Alert, error) + + // CreateOrUpdateTx returns `isNew` to indicate if the returned alert was a new one. + // It is the caller's responsibility to log alert creation if the transaction is committed (and isNew is true). + CreateOrUpdateTx(context.Context, *sql.Tx, *Alert) (a *Alert, isNew bool, err error) + + FindAllSummary(ctx context.Context) ([]Summary, error) + Escalate(ctx context.Context, alertID int, currentLevel int) error + EscalateMany(ctx context.Context, alertIDs []int) ([]int, error) + GetCreationTime(ctx context.Context, alertID int) (time.Time, error) + + LegacySearch(ctx context.Context, opt *LegacySearchOptions) ([]Alert, int, error) + Search(ctx context.Context, opts *SearchOptions) ([]Alert, error) + State(ctx context.Context, alertIDs []int) ([]State, error) +} +type Manager interface { + FindOne(context.Context, int) (*Alert, error) + FindMany(context.Context, []int) ([]Alert, error) + UpdateStatus(context.Context, int, Status) error + UpdateStatusByService(ctx context.Context, serviceID string, status Status) error + UpdateManyAlertStatus(ctx context.Context, status Status, alertIDs []int) (updatedAlertIDs []int, err error) + UpdateStatusTx(context.Context, *sql.Tx, int, Status) error + EPID(ctx context.Context, alertID int) (string, error) +} + +type DB struct { + db *sql.DB + logDB alertlog.Store + + insert *sql.Stmt + update *sql.Stmt + logs *sql.Stmt + findAllSummary *sql.Stmt + findMany *sql.Stmt + getCreationTime *sql.Stmt + getServiceID *sql.Stmt + + lockSvc *sql.Stmt + lockAlertSvc *sql.Stmt + + getStatusAndLockSvc *sql.Stmt + + createUpdNew *sql.Stmt + createUpdAck *sql.Stmt + createUpdClose *sql.Stmt + + updateByStatusAndService *sql.Stmt + updateByIDAndStatus *sql.Stmt + + epID *sql.Stmt + + escalate *sql.Stmt + epState *sql.Stmt +} + +// A Trigger signals that an alert needs to be processed +type Trigger interface { + TriggerAlert(int) +} + +func NewDB(ctx context.Context, db *sql.DB, logDB alertlog.Store) (*DB, error) { + prep := &util.Prepare{DB: db, Ctx: ctx} + + p := prep.P + + return &DB{ + db: db, + logDB: logDB, + + lockSvc: p(`select 1 from services where id = $1 for update`), + lockAlertSvc: p(`SELECT 1 FROM services s JOIN alerts a ON a.id = ANY ($1) AND s.id = a.service_id FOR UPDATE`), + getStatusAndLockSvc: p(` + SELECT a.status + FROM services s + JOIN alerts a on a.id = $1 and a.service_id = s.id + FOR UPDATE + `), + + epID: p(` + SELECT escalation_policy_id + FROM + services svc, + alerts a + WHERE svc.id = a.service_id + `), + + insert: p(` + INSERT INTO alerts (summary, details, service_id, source, status, dedup_key) VALUES ($1, $2, $3, $4, $5, $6) RETURNING id, created_at + `), + update: p("UPDATE alerts SET status = $2 WHERE id = $1"), + logs: p("SELECT timestamp, event, message FROM alert_logs WHERE alert_id = $1"), + findAllSummary: p(` + with counts as ( + select count(id), status, service_id + from alerts + group by status, service_id + ) + select distinct + service_id, + svc.name, + (select count from counts c where status = 'triggered' and c.service_id = cn.service_id) triggered, + (select count from counts c where status = 'active' and c.service_id = cn.service_id) active, + (select count from counts c where status = 'closed' and c.service_id = cn.service_id) closed + from counts cn + join services svc on svc.id = service_id + order by triggered desc nulls last, active desc nulls last, closed desc nulls last, service_id + limit 50 + `), + + findMany: p(` + SELECT + a.id, + a.summary, + a.details, + a.service_id, + a.source, + a.status, + created_at, + a.dedup_key + FROM alerts a + WHERE a.id = ANY ($1) + `), + createUpdNew: p(` + WITH existing as ( + SELECT id, summary, details, status, source, created_at, false + FROM alerts + WHERE service_id = $3 AND dedup_key = $5 + ), to_insert as ( + SELECT 1 + EXCEPT + SELECT 1 + FROM existing + ), inserted as ( + INSERT INTO alerts ( + summary, details, service_id, source, dedup_key + ) + SELECT $1, $2, $3, $4, $5 + FROM to_insert + RETURNING id, summary, details, status, source, created_at, true + ) + SELECT * FROM existing + UNION + SELECT * FROM inserted + `), + createUpdAck: p(` + UPDATE alerts a + SET status = 'active' + FROM alerts old + WHERE + old.id = a.id AND + a.service_id = $1 AND + a.dedup_key = $2 AND + a.status != 'closed' + RETURNING a.id, a.summary, a.details, old.status, a.created_at + `), + createUpdClose: p(` + UPDATE alerts a + SET status = 'closed' + WHERE + service_id = $1 and + dedup_key = $2 and + status != 'closed' + RETURNING id, summary, details, created_at + `), + + getCreationTime: p("SELECT created_at FROM alerts WHERE id = $1"), + getServiceID: p("SELECT service_id FROM alerts WHERE id = $1"), + updateByStatusAndService: p(` + UPDATE + alerts + SET + status = $2 + WHERE + service_id = $1 + AND ( + $2 > status + ) + `), + updateByIDAndStatus: p(` + UPDATE alerts + SET status = $1 + WHERE + id = ANY ($2) AND + ($1 > status) + RETURNING id + `), + + escalate: p(` + UPDATE escalation_policy_state state + SET force_escalation = true + WHERE + state.alert_id = ANY($1) AND + state.force_escalation = false + RETURNING state.alert_id + `), + + epState: p(` + SELECT alert_id, last_escalation, loop_count, escalation_policy_step_number + FROM escalation_policy_state + WHERE alert_id = ANY ($1) + `), + }, prep.Err +} + +func (db *DB) EPID(ctx context.Context, alertID int) (string, error) { + err := permission.LimitCheckAny(ctx, permission.System) + if err != nil { + return "", err + } + + row := db.epID.QueryRowContext(ctx, alertID) + var epID string + err = row.Scan(&epID) + if err != nil { + return "", err + } + return epID, nil +} + +func (db *DB) canTouchAlert(ctx context.Context, alertID int) error { + checks := []permission.Checker{ + permission.System, + permission.Admin, + permission.User, + } + if permission.Service(ctx) { + var serviceID string + err := db.getServiceID.QueryRowContext(ctx, alertID).Scan(&serviceID) + if err != nil { + return err + } + checks = append(checks, permission.MatchService(serviceID)) + } + + return permission.LimitCheckAny(ctx, checks...) +} + +func (db *DB) Escalate(ctx context.Context, alertID int, currentLevel int) error { + _, err := db.EscalateMany(ctx, []int{alertID}) + if err != nil { + return err + } + return nil +} + +func (db *DB) EscalateMany(ctx context.Context, alertIDs []int) ([]int, error) { + err := permission.LimitCheckAny(ctx, permission.System, permission.User) + if err != nil { + return nil, err + } + + if len(alertIDs) == 0 { + return nil, nil + } + + err = validate.Range("AlertIDs", len(alertIDs), 1, maxBatch) + if err != nil { + return nil, err + } + + ids64 := make(pq.Int64Array, len(alertIDs)) + for i, id := range alertIDs { + ids64[i] = int64(id) + } + + tx, err := db.db.BeginTx(ctx, nil) + if err != nil { + return nil, err + } + defer tx.Rollback() + + _, err = tx.StmtContext(ctx, db.lockAlertSvc).ExecContext(ctx, ids64) + if err != nil { + return nil, err + } + + rows, err := tx.StmtContext(ctx, db.escalate).QueryContext(ctx, ids64) + if err == sql.ErrNoRows { + log.Debugf(ctx, "escalate alert: no rows matched") + err = nil + } + if err != nil { + return nil, err + } + defer rows.Close() + + updatedIDs := make([]int, 0, len(alertIDs)) + for rows.Next() { + var id int + err := rows.Scan(&id) + if err != nil { + return nil, err + } + updatedIDs = append(updatedIDs, id) + } + + err = db.logDB.LogManyTx(ctx, tx, updatedIDs, alertlog.TypeEscalationRequest, nil) + if err != nil { + return nil, err + } + + err = tx.Commit() + if err != nil { + return nil, err + } + return updatedIDs, err +} + +func (db *DB) UpdateStatusByService(ctx context.Context, serviceID string, status Status) error { + err := permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return err + } + + err = validate.UUID("ServiceID", serviceID) + if err != nil { + return err + } + + err = validate.OneOf("Status", status, StatusActive, StatusClosed) + if err != nil { + return err + } + + tx, err := db.db.BeginTx(ctx, nil) + if err != nil { + return err + } + defer tx.Rollback() + + t := alertlog.TypeAcknowledged + if status == StatusClosed { + t = alertlog.TypeClosed + } + + err = db.logDB.LogServiceTx(ctx, tx, serviceID, t, nil) + if err != nil { + return err + } + + _, err = tx.StmtContext(ctx, db.lockSvc).ExecContext(ctx, serviceID) + if err != nil { + return err + } + + _, err = tx.StmtContext(ctx, db.updateByStatusAndService).ExecContext(ctx, serviceID, status) + if err != nil { + return err + } + + return tx.Commit() +} + +func (db *DB) UpdateManyAlertStatus(ctx context.Context, status Status, alertIDs []int) ([]int, error) { + err := permission.LimitCheckAny(ctx, permission.System, permission.User) + if err != nil { + return nil, err + } + + if len(alertIDs) == 0 { + return nil, nil + } + + err = validate.Many( + validate.Range("AlertIDs", len(alertIDs), 1, maxBatch), + validate.OneOf("Status", status, StatusActive, StatusClosed), + ) + if err != nil { + return nil, err + } + + ids64 := make(pq.Int64Array, len(alertIDs)) + for i, k := range alertIDs { + ids64[i] = int64(k) + } + + tx, err := db.db.BeginTx(ctx, nil) + if err != nil { + return nil, err + } + defer tx.Rollback() + + t := alertlog.TypeAcknowledged + if status == StatusClosed { + t = alertlog.TypeClosed + } + + var updatedIDs []int + + _, err = tx.StmtContext(ctx, db.lockAlertSvc).ExecContext(ctx, ids64) + if err != nil { + return nil, err + } + + rows, err := tx.StmtContext(ctx, db.updateByIDAndStatus).QueryContext(ctx, status, ids64) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + var id int + err = rows.Scan(&id) + if err != nil { + return nil, err + } + updatedIDs = append(updatedIDs, id) + } + + // Logging Batch Updates for every alertID whose status was updated + err = db.logDB.LogManyTx(ctx, tx, updatedIDs, t, nil) + if err != nil { + return nil, err + } + + err = tx.Commit() + if err != nil { + return nil, err + } + return updatedIDs, nil +} + +func (db *DB) Create(ctx context.Context, a *Alert) (*Alert, error) { + n, err := a.Normalize() // validation + if err != nil { + return nil, err + } + + if n.Status == StatusClosed { + return nil, validation.NewFieldError("Status", "Cannot create a closed alert.") + } + err = permission.LimitCheckAny(ctx, + permission.System, + permission.Admin, + permission.User, + permission.MatchService(a.ServiceID), + ) + if err != nil { + return nil, err + } + + tx, err := db.db.BeginTx(ctx, nil) + if err != nil { + return nil, err + } + defer tx.Rollback() + + _, err = tx.StmtContext(ctx, db.lockSvc).ExecContext(ctx, n.ServiceID) + if err != nil { + return nil, err + } + + n, err = db._create(ctx, tx, *n) + if err != nil { + return nil, err + } + + db.logDB.MustLogTx(ctx, tx, n.ID, alertlog.TypeCreated, nil) + + err = tx.Commit() + if err != nil { + return nil, err + } + + trace.FromContext(ctx).Annotate( + []trace.Attribute{ + trace.StringAttribute("service.id", n.ServiceID), + trace.Int64Attribute("alert.id", int64(n.ID)), + }, + "Alert created.", + ) + ctx = log.WithFields(ctx, log.Fields{"AlertID": n.ID, "ServiceID": n.ServiceID}) + log.Logf(ctx, "Alert created.") + + return n, nil +} +func (db *DB) _create(ctx context.Context, tx *sql.Tx, a Alert) (*Alert, error) { + row := tx.StmtContext(ctx, db.insert).QueryRowContext(ctx, a.Summary, a.Details, a.ServiceID, a.Source, a.Status, a.DedupKey()) + err := row.Scan(&a.ID, &a.CreatedAt) + if err != nil { + return nil, err + } + + return &a, nil +} +func (db *DB) CreateOrUpdateTx(ctx context.Context, tx *sql.Tx, a *Alert) (*Alert, bool, error) { + err := permission.LimitCheckAny(ctx, + permission.System, + permission.Admin, + permission.User, + permission.MatchService(a.ServiceID), + ) + if err != nil { + return nil, false, err + } + /* + - if new status is triggered, create or return existing + + - if new status is ack, old is trig, ack and return existing + - if new status is ack, old is ack, return existing + - if new status is ack, old is close, return nil + + - if new status is close, old is ack or trig, close, return existing + - if new status is close, old is close, return nil + */ + + n, err := a.Normalize() // validation + if err != nil { + return nil, false, err + } + + _, err = tx.StmtContext(ctx, db.lockSvc).ExecContext(ctx, n.ServiceID) + if err != nil { + return nil, false, err + } + + var inserted bool + var logType alertlog.Type + switch n.Status { + case StatusTriggered: + err = tx.Stmt(db.createUpdNew). + QueryRowContext(ctx, n.Summary, n.Details, n.ServiceID, n.Source, n.DedupKey()). + Scan(&n.ID, &n.Summary, &n.Details, &n.Status, &n.Source, &n.CreatedAt, &inserted) + if !inserted { + logType = alertlog.TypeDuplicateSupressed + } else { + logType = alertlog.TypeCreated + } + case StatusActive: + var oldStatus Status + err = tx.Stmt(db.createUpdAck). + QueryRowContext(ctx, n.ServiceID, n.DedupKey()). + Scan(&n.ID, &n.Summary, &n.Details, &n.CreatedAt, &oldStatus) + if oldStatus != n.Status { + logType = alertlog.TypeAcknowledged + } + case StatusClosed: + err = tx.Stmt(db.createUpdClose). + QueryRowContext(ctx, n.ServiceID, n.DedupKey()). + Scan(&n.ID, &n.Summary, &n.Details, &n.CreatedAt) + logType = alertlog.TypeClosed + } + if err == sql.ErrNoRows { + // already closed/doesn't exist + return nil, false, nil + } + if err != nil { + return nil, false, err + } + if logType != "" { + db.logDB.MustLogTx(ctx, tx, n.ID, logType, nil) + } + + return n, inserted, nil +} + +func (db *DB) CreateOrUpdate(ctx context.Context, a *Alert) (*Alert, error) { + err := permission.LimitCheckAny(ctx, + permission.System, + permission.Admin, + permission.User, + permission.MatchService(a.ServiceID), + ) + if err != nil { + return nil, err + } + + tx, err := db.db.BeginTx(ctx, nil) + if err != nil { + return nil, err + } + defer tx.Rollback() + + n, isNew, err := db.CreateOrUpdateTx(ctx, tx, a) + if err != nil { + return nil, err + } + + err = tx.Commit() + if err != nil { + return nil, err + } + if n == nil { + return nil, nil + } + if isNew { + trace.FromContext(ctx).Annotate( + []trace.Attribute{ + trace.StringAttribute("service.id", n.ServiceID), + trace.Int64Attribute("alert.id", int64(n.ID)), + }, + "Alert created.", + ) + ctx = log.WithFields(ctx, log.Fields{"AlertID": n.ID, "ServiceID": n.ServiceID}) + log.Logf(ctx, "Alert created.") + } + + return n, nil +} + +func (db *DB) UpdateStatusTx(ctx context.Context, tx *sql.Tx, id int, s Status) error { + var stat Status + err := tx.Stmt(db.getStatusAndLockSvc).QueryRowContext(ctx, id).Scan(&stat) + if err != nil { + return err + } + if stat == StatusClosed { + return logError{isAlreadyClosed: true, alertID: id, _type: alertlog.TypeClosed, logDB: db.logDB} + } + if stat == StatusActive && s == StatusActive { + return logError{isAlreadyAcknowledged: true, alertID: id, _type: alertlog.TypeAcknowledged, logDB: db.logDB} + } + + _, err = tx.Stmt(db.update).ExecContext(ctx, id, s) + if err != nil { + return err + } + + if s == StatusClosed { + db.logDB.MustLogTx(ctx, tx, id, alertlog.TypeClosed, nil) + } else if s == StatusActive { + db.logDB.MustLogTx(ctx, tx, id, alertlog.TypeAcknowledged, nil) + } else if s != StatusTriggered { + log.Log(ctx, errors.Errorf("unknown/unhandled alert status update: %s", s)) + } + + return nil +} + +func (db *DB) UpdateStatus(ctx context.Context, id int, s Status) error { + err := validate.OneOf("Status", s, StatusTriggered, StatusActive, StatusClosed) + if err != nil { + return err + } + err = db.canTouchAlert(ctx, id) + if err != nil { + return err + } + tx, err := db.db.BeginTx(ctx, nil) + if err != nil { + return err + } + defer tx.Rollback() + + err = db.UpdateStatusTx(ctx, tx, id, s) + if err != nil { + return err + } + return tx.Commit() +} + +func (db *DB) GetCreationTime(ctx context.Context, id int) (t time.Time, err error) { + err = permission.LimitCheckAny(ctx, permission.System, permission.Admin, permission.User) + if err != nil { + return t, err + } + + row := db.getCreationTime.QueryRowContext(ctx, id) + err = row.Scan(&t) + return t, err +} + +func (db *DB) FindAllSummary(ctx context.Context) ([]Summary, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + + rows, err := db.findAllSummary.QueryContext(ctx) + if err != nil { + return nil, err + } + defer rows.Close() + + var s Summary + + var result []Summary + var unack, ack, clos sql.NullInt64 + for rows.Next() { + err = rows.Scan( + &s.ServiceID, + &s.ServiceName, + &unack, &ack, &clos, + ) + if err != nil { + return nil, err + } + s.Totals.Unack = int(unack.Int64) + s.Totals.Ack = int(ack.Int64) + s.Totals.Closed = int(clos.Int64) + + result = append(result, s) + } + + return result, nil +} + +func (db *DB) FindOne(ctx context.Context, id int) (*Alert, error) { + alerts, err := db.FindMany(ctx, []int{id}) + if err != nil { + return nil, err + } + // If alert is not found + if len(alerts) == 0 { + return nil, sql.ErrNoRows + } + return &alerts[0], nil +} + +func (db *DB) FindMany(ctx context.Context, ids []int) ([]Alert, error) { + err := permission.LimitCheckAny(ctx, permission.System, permission.User) + if err != nil { + return nil, err + } + if len(ids) == 0 { + return nil, nil + } + + err = validate.Range("AlertIDs", len(ids), 1, maxBatch) + if err != nil { + return nil, err + } + + ids64 := make(pq.Int64Array, len(ids)) + for i, id := range ids { + ids64[i] = int64(id) + } + rows, err := db.findMany.QueryContext(ctx, ids64) + if err != nil { + return nil, err + } + defer rows.Close() + + alerts := make([]Alert, 0, len(ids)) + + for rows.Next() { + var a Alert + err = a.scanFrom(rows.Scan) + if err != nil { + return nil, err + } + alerts = append(alerts, a) + } + + return alerts, nil +} + +func (db *DB) State(ctx context.Context, alertIDs []int) ([]State, error) { + err := permission.LimitCheckAny(ctx, permission.System, permission.User) + if err != nil { + return nil, err + } + + err = validate.Range("AlertIDs", len(alertIDs), 1, maxBatch) + if err != nil { + return nil, err + } + + ids64 := make(pq.Int64Array, len(alertIDs)) + for i, id := range alertIDs { + ids64[i] = int64(id) + } + + var t pq.NullTime + rows, err := db.epState.QueryContext(ctx, ids64) + if err == sql.ErrNoRows { + err = nil + } + if err != nil { + return nil, err + } + defer rows.Close() + + list := make([]State, 0, len(alertIDs)) + for rows.Next() { + var s State + err = rows.Scan(&s.AlertID, &t, &s.RepeatCount, &s.StepNumber) + if t.Valid { + s.LastEscalation = t.Time + } + if err != nil { + return nil, err + } + list = append(list, s) + } + + return list, nil +} diff --git a/alert/summary.go b/alert/summary.go new file mode 100644 index 0000000000..d9b760726e --- /dev/null +++ b/alert/summary.go @@ -0,0 +1,11 @@ +package alert + +type Summary struct { + ServiceID string `json:"service_id"` + ServiceName string `json:"service_name"` + Totals struct { + Unack int `json:"unacknowledged"` + Ack int `json:"acknowledged"` + Closed int `json:"closed"` + } `json:"totals"` +} diff --git a/app/app.go b/app/app.go new file mode 100644 index 0000000000..b1efed205b --- /dev/null +++ b/app/app.go @@ -0,0 +1,149 @@ +package app + +import ( + "database/sql" + "net" + "net/http" + + "github.com/target/goalert/alert" + alertlog "github.com/target/goalert/alert/log" + "github.com/target/goalert/app/lifecycle" + "github.com/target/goalert/auth" + "github.com/target/goalert/auth/nonce" + "github.com/target/goalert/config" + "github.com/target/goalert/engine" + "github.com/target/goalert/engine/resolver" + "github.com/target/goalert/escalation" + "github.com/target/goalert/graphql" + "github.com/target/goalert/graphql2/graphqlapp" + "github.com/target/goalert/heartbeat" + "github.com/target/goalert/integrationkey" + "github.com/target/goalert/keyring" + "github.com/target/goalert/label" + "github.com/target/goalert/limit" + "github.com/target/goalert/notification" + "github.com/target/goalert/notification/slack" + "github.com/target/goalert/notification/twilio" + "github.com/target/goalert/notificationchannel" + "github.com/target/goalert/oncall" + "github.com/target/goalert/override" + "github.com/target/goalert/schedule" + "github.com/target/goalert/schedule/rotation" + "github.com/target/goalert/schedule/rule" + "github.com/target/goalert/service" + "github.com/target/goalert/timezone" + "github.com/target/goalert/user" + "github.com/target/goalert/user/contactmethod" + "github.com/target/goalert/user/favorite" + "github.com/target/goalert/user/notificationrule" + + "github.com/pkg/errors" +) + +// App represents an instance of the GoAlert application. +type App struct { + cfg appConfig + + mgr *lifecycle.Manager + + db *sql.DB + l net.Listener + + cooldown *cooldown + doneCh chan struct{} + + srv *http.Server + requestLock *contextLocker + startupErr error + + notificationManager *notification.Manager + engine *engine.Engine + graphql *graphql.Handler + graphql2 *graphqlapp.App + authHandler *auth.Handler + + twilioSMS *twilio.SMS + twilioVoice *twilio.Voice + twilioConfig *twilio.Config + + slackChan *slack.ChannelSender + + ConfigStore *config.Store + + AlertStore alert.Store + AlertlogStore alertlog.Store + + UserStore user.Store + ContactMethodStore contactmethod.Store + NotificationRuleStore notificationrule.Store + FavoriteStore favorite.Store + + ServiceStore service.Store + EscalationStore escalation.Store + IntegrationKeyStore integrationkey.Store + ScheduleRuleStore rule.Store + NotificationStore notification.Store + ScheduleStore schedule.Store + RotationStore rotation.Store + + OverrideStore override.Store + Resolver resolver.Resolver + LimitStore limit.Store + HeartbeatStore heartbeat.Store + + OAuthKeyring keyring.Keyring + SessionKeyring keyring.Keyring + + NonceStore nonce.Store + LabelStore label.Store + OnCallStore oncall.Store + NCStore notificationchannel.Store + TimeZoneStore *timezone.Store +} + +// NewApp constructs a new App and binds the listening socket. +func NewApp(c appConfig, db *sql.DB) (*App, error) { + l, err := net.Listen("tcp", c.ListenAddr) + if err != nil { + return nil, errors.Wrapf(err, "bind address %s", c.ListenAddr) + } + + app := &App{ + l: l, + db: db, + cfg: c, + doneCh: make(chan struct{}), + cooldown: newCooldown(c.KubernetesCooldown), + + requestLock: newContextLocker(), + } + + if c.StatusAddr != "" { + err = listenStatus(c.StatusAddr, app.doneCh) + if err != nil { + return nil, errors.Wrap(err, "start status listener") + } + } + + app.db.SetMaxIdleConns(c.DBMaxIdle) + app.db.SetMaxOpenConns(c.DBMaxOpen) + + app.mgr = lifecycle.NewManager(app._Run, app._Shutdown) + err = app.mgr.SetStartupFunc(app.startup) + if err != nil { + return nil, err + } + + return app, nil +} + +// Status returns the current lifecycle status of the App. +func (a *App) Status() lifecycle.Status { + return a.mgr.Status() +} + +// ActiveRequests returns the current number of active +// requests, not including pending ones during pause. +func (a *App) ActiveRequests() int { + return a.requestLock.RLockCount() +} diff --git a/app/appconfig.go b/app/appconfig.go new file mode 100644 index 0000000000..a4512525d3 --- /dev/null +++ b/app/appconfig.go @@ -0,0 +1,52 @@ +package app + +import ( + "time" + + "github.com/target/goalert/keyring" +) + +type appConfig struct { + ListenAddr string + Verbose bool + JSON bool + LogRequests bool + APIOnly bool + + DBMaxOpen int + DBMaxIdle int + + MaxReqBodyBytes int64 + MaxReqHeaderBytes int + + TwilioBaseURL string + SlackBaseURL string + + DBURL string + DBURLNext string + + JaegerEndpoint string + JaegerAgentEndpoint string + + StackdriverProjectID string + + TracingClusterName string + TracingPodNamespace string + TracingPodName string + TracingContainerName string + TracingNodeName string + + KubernetesCooldown time.Duration + StatusAddr string + + LogTraces bool + TraceProbability float64 + + EncryptionKeys keyring.Keys + + RegionName string + + StubNotifiers bool + + UIURL string +} diff --git a/app/clusterexporter.go b/app/clusterexporter.go new file mode 100644 index 0000000000..f177ab5353 --- /dev/null +++ b/app/clusterexporter.go @@ -0,0 +1,38 @@ +package app + +import "go.opencensus.io/trace" + +type clusterExporter struct { + *appConfig + e trace.Exporter +} + +func (c *appConfig) wrapExporter(e trace.Exporter) trace.Exporter { + if c.TracingClusterName == "" { + return e + } + return &clusterExporter{ + appConfig: c, + e: e, + } +} +func (c *clusterExporter) Flush() { + type flusher interface { + Flush() + } + if f, ok := c.e.(flusher); ok { + f.Flush() + } +} +func (c *clusterExporter) ExportSpan(s *trace.SpanData) { + if s.Attributes == nil { + s.Attributes = make(map[string]interface{}, 5) + } + s.Attributes["cluster_name"] = c.TracingClusterName + s.Attributes["namespace_id"] = c.TracingPodNamespace + s.Attributes["pod_id"] = c.TracingPodName + s.Attributes["container_name"] = c.TracingContainerName + s.Attributes["instance_id"] = c.TracingNodeName + + c.e.ExportSpan(s) +} diff --git a/app/cmd.go b/app/cmd.go new file mode 100644 index 0000000000..be2b2ecf99 --- /dev/null +++ b/app/cmd.go @@ -0,0 +1,618 @@ +package app + +import ( + "context" + "database/sql" + "fmt" + "io/ioutil" + "net/url" + "os" + "os/signal" + "runtime" + "strings" + "syscall" + "time" + + "github.com/lib/pq" + toml "github.com/pelletier/go-toml" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/target/goalert/auth/basic" + "github.com/target/goalert/keyring" + "github.com/target/goalert/migrate" + "github.com/target/goalert/permission" + "github.com/target/goalert/remotemonitor" + "github.com/target/goalert/sqltrace" + "github.com/target/goalert/switchover" + "github.com/target/goalert/switchover/dbsync" + "github.com/target/goalert/user" + "github.com/target/goalert/util/log" + "github.com/target/goalert/validation" + "go.opencensus.io/trace" + "golang.org/x/crypto/ssh/terminal" +) + +var shutdownSignalCh = make(chan os.Signal, 2) + +func init() { + signal.Notify(shutdownSignalCh, shutdownSignals...) +} + +// RootCmd is the configuration for running the app binary. +var RootCmd = &cobra.Command{ + Use: "goalert", + Short: "Alerting platform.", + RunE: func(cmd *cobra.Command, args []string) error { + + // update JSON output first + if viper.GetBool("json") { + log.EnableJSON() + } + if viper.GetBool("verbose") { + log.EnableVerbose() + } + + err := viper.ReadInConfig() + // ignore file not found error + if _, ok := err.(viper.ConfigFileNotFoundError); err != nil && !ok { + return errors.Wrap(err, "read config") + } + + ctx := context.Background() + cfg, err := getConfig() + if err != nil { + return err + } + exporters, err := configTracing(ctx, cfg) + if err != nil { + return errors.Wrap(err, "config tracing") + } + + defer func() { + // flush exporters + type flusher interface { + Flush() + } + for _, e := range exporters { + if f, ok := e.(flusher); ok { + f.Flush() + } + } + }() + + wrappedDriver := sqltrace.WrapDriver(&pq.Driver{}, &sqltrace.WrapOptions{Query: true, Args: true}) + + u, err := url.Parse(cfg.DBURL) + if err != nil { + return errors.Wrap(err, "parse old URL") + } + q := u.Query() + if cfg.DBURLNext != "" { + q.Set("application_name", "GoAlert (Switch-Over Mode)") + } else { + q.Set("application_name", "GoAlert") + } + u.RawQuery = q.Encode() + cfg.DBURL = u.String() + + dbc, err := wrappedDriver.OpenConnector(cfg.DBURL) + if err != nil { + return errors.Wrap(err, "connect to postgres") + } + var db *sql.DB + var h *switchover.Handler + if cfg.DBURLNext != "" { + u, err := url.Parse(cfg.DBURLNext) + if err != nil { + return errors.Wrap(err, "parse next URL") + } + q := u.Query() + q.Set("application_name", "GoAlert (Switch-Over Mode)") + u.RawQuery = q.Encode() + cfg.DBURLNext = u.String() + + dbcNext, err := wrappedDriver.OpenConnector(cfg.DBURLNext) + if err != nil { + return errors.Wrap(err, "connect to postres (next)") + } + h, err = switchover.NewHandler(ctx, dbc, dbcNext, cfg.DBURL, cfg.DBURLNext) + if err != nil { + return errors.Wrap(err, "init changeover handler") + } + db = h.DB() + } else { + db = sql.OpenDB(dbc) + } + + app, err := NewApp(cfg, db) + if err != nil { + return errors.Wrap(err, "init app") + } + if h != nil { + h.SetApp(app) + } + + go handleShutdown(ctx, func(ctx context.Context) error { + if h != nil { + h.Abort() + } + return app.Shutdown(ctx) + }) + + // trigger engine cycles by process signal + trigCh := make(chan os.Signal, 1) + signal.Notify(trigCh, triggerSignals...) + go func() { + for range trigCh { + app.Trigger() + } + }() + + return errors.Wrap(app.Run(ctx), "run app") + }, +} + +func handleShutdown(ctx context.Context, fn func(ctx context.Context) error) { + <-shutdownSignalCh + log.Logf(ctx, "Application attempting graceful shutdown.") + sCtx, cancel := context.WithTimeout(ctx, shutdownTimeout) + defer cancel() + sCtx, sp := trace.StartSpan(sCtx, "Shutdown") + defer sp.End() + go func() { + <-shutdownSignalCh + log.Logf(ctx, "Second signal received, terminating immediately") + sp.Annotate([]trace.Attribute{trace.BoolAttribute("shutdown.force", true)}, "Second signal received.") + cancel() + }() + + err := fn(sCtx) + if err != nil { + sp.Annotate([]trace.Attribute{trace.BoolAttribute("error", true)}, err.Error()) + } +} + +var ( + versionCmd = &cobra.Command{ + Use: "version", + Short: "Output the current version.", + RunE: func(cmd *cobra.Command, args []string) error { + + date := buildDate + t, err := time.Parse(time.RFC3339, date) + if err == nil { + date = t.Local().Format(time.RFC3339) + } + + migrations := migrate.Names() + + fmt.Printf(`Version: %s +GitCommit: %s (%s) +BuildDate: %s +GoVersion: %s (%s) +Platform: %s/%s +Migration: %s (#%d) +`, gitVersion, + gitCommit, gitTreeState, + date, + runtime.Version(), runtime.Compiler, + runtime.GOOS, runtime.GOARCH, + migrations[len(migrations)-1], len(migrations), + ) + + return nil + }, + } + + switchCmd = &cobra.Command{ + Use: "switchover-shell", + Short: "Start a the switchover shell, used to initiate, control, and monitor a DB switchover operation.", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := getConfig() + if err != nil { + return err + } + + if cfg.DBURLNext == "" { + return validation.NewFieldError("DBURLNext", "must not be empty for switchover") + } + + return dbsync.RunShell(cfg.DBURL, cfg.DBURLNext) + }, + } + + monitorCmd = &cobra.Command{ + Use: "monitor", + Short: "Start a remote-monitoring process that functionally tests alerts.", + RunE: func(cmd *cobra.Command, args []string) error { + file := viper.GetString("config-file") + if file == "" { + return errors.New("config file is required") + } + + t, err := toml.LoadFile(file) + if err != nil { + return err + } + + var cfg remotemonitor.Config + err = t.Unmarshal(&cfg) + if err != nil { + return err + } + + mon, err := remotemonitor.NewMonitor(cfg) + if err != nil { + return err + } + + handleShutdown(context.Background(), mon.Shutdown) + return nil + }, + } + + exportCmd = &cobra.Command{ + Use: "export-migrations", + Short: "Export all migrations as .sql files. Use --export-dir to control the destination.", + RunE: func(cmd *cobra.Command, args []string) error { + // update JSON output first + if viper.GetBool("json") { + log.EnableJSON() + } + if viper.GetBool("verbose") { + log.EnableVerbose() + } + + err := viper.ReadInConfig() + // ignore file not found error + if _, ok := err.(viper.ConfigFileNotFoundError); err != nil && !ok { + return errors.Wrap(err, "read config") + } + + return migrate.DumpMigrations(viper.GetString("export-dir")) + }, + } + + migrateCmd = &cobra.Command{ + Use: "migrate", + Short: "Perform migration(s), then exit.", + RunE: func(cmd *cobra.Command, args []string) error { + if viper.GetBool("verbose") { + log.EnableVerbose() + } + + err := viper.ReadInConfig() + // ignore file not found error + if _, ok := err.(viper.ConfigFileNotFoundError); err != nil && !ok { + return errors.Wrap(err, "read config") + } + + c, err := getConfig() + if err != nil { + return err + } + db, err := sql.Open("postgres", c.DBURL) + if err != nil { + return errors.Wrap(err, "connect to postgres") + } + defer db.Close() + + ctx := context.Background() + down := viper.GetString("down") + up := viper.GetString("up") + if down != "" { + n, err := migrate.Down(ctx, db, down) + + if err != nil { + return errors.Wrap(err, "apply DOWN migrations") + } + if n > 0 { + log.Debugf(context.TODO(), "Applied %d DOWN migrations.", n) + } + } + + if up != "" || down == "" { + n, err := migrate.Up(ctx, db, up) + + if err != nil { + return errors.Wrap(err, "apply UP migrations") + } + if n > 0 { + log.Debugf(context.TODO(), "Applied %d UP migrations.", n) + } + } + + return nil + }, + } + + setConfigCmd = &cobra.Command{ + Use: "set-config", + Short: "Sets current config values in the DB from stdin.", + RunE: func(cmd *cobra.Command, args []string) error { + + if viper.GetString("data-encryption-key") == "" && !viper.GetBool("allow-empty-data-encryption-key") { + return validation.NewFieldError("data-encryption-key", "Must not be empty, or set --allow-empty-data-encryption-key") + } + var data []byte + if viper.GetString("data") != "" { + data = []byte(viper.GetString("data")) + } else { + if terminal.IsTerminal(int(os.Stdin.Fd())) { + // Only print message if we're not piping + fmt.Println("Enter or paste config data (JSON), then press CTRL+D when done or CTRL+C to quit.") + } + intCh := make(chan os.Signal, 1) + doneCh := make(chan struct{}) + signal.Notify(intCh, os.Interrupt) + go func() { + select { + case <-intCh: + os.Exit(1) + case <-doneCh: + } + }() + + var err error + data, err = ioutil.ReadAll(os.Stdin) + close(doneCh) + if err != nil { + return errors.Wrap(err, "read stdin") + } + } + + return getSetConfig(true, data) + }, + } + + getConfigCmd = &cobra.Command{ + Use: "get-config", + Short: "Gets current config values.", + RunE: func(cmd *cobra.Command, args []string) error { + return getSetConfig(false, nil) + }, + } + + addUserCmd = &cobra.Command{ + Use: "add-user", + Short: "Adds a user for basic authentication.", + RunE: func(cmd *cobra.Command, args []string) error { + if viper.GetBool("verbose") { + log.EnableVerbose() + } + + err := viper.ReadInConfig() + // ignore file not found error + if _, ok := err.(viper.ConfigFileNotFoundError); err != nil && !ok { + return errors.Wrap(err, "read config") + } + + c, err := getConfig() + if err != nil { + return err + } + db, err := sql.Open("postgres", c.DBURL) + if err != nil { + return errors.Wrap(err, "connect to postgres") + } + defer db.Close() + + ctx := permission.SystemContext(context.Background(), "AddUser") + + basicStore, err := basic.NewStore(ctx, db) + if err != nil { + return errors.Wrap(err, "init basic auth store") + } + + pass := cmd.Flag("pass").Value.String() + id := cmd.Flag("user-id").Value.String() + username := cmd.Flag("user").Value.String() + + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return errors.Wrap(err, "begin tx") + } + defer tx.Rollback() + + if id == "" { + u := &user.User{ + Name: username, + Email: cmd.Flag("email").Value.String(), + Role: permission.RoleUser, + } + if cmd.Flag("admin").Value.String() == "true" { + u.Role = permission.RoleAdmin + } + userStore, err := user.NewDB(ctx, db) + if err != nil { + return errors.Wrap(err, "init user store") + } + u, err = userStore.InsertTx(ctx, tx, u) + if err != nil { + return errors.Wrap(err, "create user") + } + id = u.ID + } + + if pass == "" { + fmt.Printf("New Password: ") + p, err := terminal.ReadPassword(syscall.Stdin) + if err != nil { + return errors.Wrap(err, "get password") + } + pass = string(p) + fmt.Printf("\n'%s'\n", pass) + } + + err = basicStore.CreateTx(ctx, tx, id, username, pass) + if err != nil { + return errors.Wrap(err, "add basic auth entry") + } + + err = tx.Commit() + if err != nil { + return errors.Wrap(err, "commit tx") + } + + log.Logf(ctx, "Username '%s' added.", username) + + return nil + }, + } +) + +// getConfig will load the current configuration from viper +func getConfig() (appConfig, error) { + cfg := appConfig{ + JSON: viper.GetBool("json"), + LogRequests: viper.GetBool("log-requests"), + Verbose: viper.GetBool("verbose"), + APIOnly: viper.GetBool("api-only"), + + DBMaxOpen: viper.GetInt("db-max-open"), + DBMaxIdle: viper.GetInt("db-max-idle"), + + MaxReqBodyBytes: viper.GetInt64("max-request-body-bytes"), + MaxReqHeaderBytes: viper.GetInt("max-request-header-bytes"), + + ListenAddr: viper.GetString("listen"), + + SlackBaseURL: viper.GetString("slack-base-url"), + TwilioBaseURL: viper.GetString("twilio-base-url"), + + DBURL: viper.GetString("db-url"), + DBURLNext: viper.GetString("db-url-next"), + + JaegerEndpoint: viper.GetString("jaeger-endpoint"), + JaegerAgentEndpoint: viper.GetString("jaeger-agent-endpoint"), + + StackdriverProjectID: viper.GetString("stackdriver-project-id"), + + TracingClusterName: viper.GetString("tracing-cluster-name"), + TracingPodNamespace: viper.GetString("tracing-pod-namespace"), + TracingPodName: viper.GetString("tracing-pod-name"), + TracingContainerName: viper.GetString("tracing-container-name"), + TracingNodeName: viper.GetString("tracing-node-name"), + TraceProbability: viper.GetFloat64("tracing-probability"), + + KubernetesCooldown: viper.GetDuration("kubernetes-cooldown"), + StatusAddr: viper.GetString("status-addr"), + + EncryptionKeys: keyring.Keys{[]byte(viper.GetString("data-encryption-key")), []byte(viper.GetString("data-encryption-key-old"))}, + + RegionName: viper.GetString("region-name"), + + StubNotifiers: viper.GetBool("stub-notifiers"), + + UIURL: viper.GetString("ui-url"), + } + + if cfg.DBURL == "" { + return cfg, validation.NewFieldError("db-url", "is required") + } + + if viper.GetBool("stack-traces") { + log.EnableStacks() + } + + return cfg, nil +} + +func init() { + RootCmd.Flags().StringP("listen", "l", "localhost:8081", "Listen address:port for the application.") + + RootCmd.Flags().Bool("api-only", false, "Starts in API-only mode (schedules & notifications will not be processed). Useful in clusters.") + + RootCmd.Flags().Int("db-max-open", 15, "Max open DB connections.") + RootCmd.Flags().Int("db-max-idle", 5, "Max idle DB connections.") + + RootCmd.Flags().Int64("max-request-body-bytes", 32768, "Max body size for all incoming requests (in bytes). Set to 0 to disable limit.") + RootCmd.Flags().Int("max-request-header-bytes", 4096, "Max header size for all incoming requests (in bytes). Set to 0 to disable limit.") + + RootCmd.Flags().String("github-base-url", "", "Base URL for GitHub auth and API calls.") + RootCmd.Flags().String("twilio-base-url", "", "Override the Twilio API URL.") + RootCmd.Flags().String("slack-base-url", "", "Override the Slack base URL.") + + RootCmd.Flags().String("region-name", "default", "Name of region for message processing (case sensitive). Only one instance per-region-name will process outgoing messages.") + + RootCmd.PersistentFlags().String("db-url", "", "Connection string for Postgres.") + RootCmd.PersistentFlags().String("db-url-next", "", "Connection string for the *next* Postgres server (enables DB switch-over mode).") + + RootCmd.Flags().String("jaeger-endpoint", "", "Jaeger HTTP Thrift endpoint") + RootCmd.Flags().String("jaeger-agent-endpoint", "", "Instructs Jaeger exporter to send spans to jaeger-agent at this address.") + RootCmd.Flags().String("stackdriver-project-id", "", "Project ID for Stackdriver. Enables tracing output to Stackdriver.") + RootCmd.Flags().String("tracing-cluster-name", "", "Cluster name to use for tracing (i.e. kubernetes, Stackdriver/GKE environment).") + RootCmd.Flags().String("tracing-pod-namespace", "", "Pod namespace to use for tracing.") + RootCmd.Flags().String("tracing-pod-name", "", "Pod name to use for tracing.") + RootCmd.Flags().String("tracing-container-name", "", "Container name to use for tracing.") + RootCmd.Flags().String("tracing-node-name", "", "Node name to use for tracing.") + RootCmd.Flags().Float64("tracing-probability", 0.01, "Probability of a new trace to be recorded.") + + RootCmd.Flags().Duration("kubernetes-cooldown", 0, "Cooldown period, from the last TCP connection, before terminating the listener when receiving a shutdown signal.") + RootCmd.Flags().String("status-addr", "", "Open a port to emit status updates. Connections are closed when the server shuts down. Can be used to keep containers running until GoAlert has exited.") + + RootCmd.PersistentFlags().String("data-encryption-key", "", "Encryption key for sensitive data like signing keys. Used for encrypting new and decrypting existing data.") + RootCmd.PersistentFlags().String("data-encryption-key-old", "", "Fallback key. Used for decrypting existing data only.") + RootCmd.PersistentFlags().Bool("stack-traces", false, "Enables stack traces with all error logs.") + + RootCmd.Flags().Bool("stub-notifiers", false, "If true, notification senders will be replaced with a stub notifier that always succeeds (useful for staging/sandbox environments).") + + RootCmd.PersistentFlags().BoolP("verbose", "v", false, "Enable verbose logging.") + RootCmd.Flags().Bool("log-requests", false, "Log all HTTP requests. If false, requests will be logged for debug/trace contexts only.") + RootCmd.PersistentFlags().Bool("json", false, "Log in JSON format.") + + RootCmd.Flags().String("ui-url", "", "Proxy UI requests to an alternate host. Default is to serve bundled assets from memory.") + + migrateCmd.Flags().String("up", "", "Target UP migration to apply.") + migrateCmd.Flags().String("down", "", "Target DOWN migration to roll back to.") + exportCmd.Flags().String("export-dir", "migrations", "Destination dir for export. If it does not exist, it will be created.") + + addUserCmd.Flags().String("user-id", "", "If specified, the auth entry will be created for an existing user ID. Default is to create a new user.") + addUserCmd.Flags().String("pass", "", "Specify new users password (if blank, prompt will be given).") + addUserCmd.Flags().String("user", "", "Specifies the login username.") + addUserCmd.Flags().String("email", "", "Specifies the email address of the new user (ignored if user-id is provided).") + addUserCmd.Flags().Bool("admin", false, "If specified, the user will be created with the admin role (ignored if user-id is provided).") + + setConfigCmd.Flags().String("data", "", "Use data instead of reading config from stdin.") + setConfigCmd.Flags().Bool("allow-empty-data-encryption-key", false, "Explicitly allow an empty data-encryption-key when setting config.") + + monitorCmd.Flags().StringP("config-file", "f", "", "Configuration file for monitoring (required).") + RootCmd.AddCommand(versionCmd, migrateCmd, exportCmd, monitorCmd, switchCmd, addUserCmd, getConfigCmd, setConfigCmd) + + err := viper.BindPFlags(RootCmd.Flags()) + if err != nil { + panic(err) + } + err = viper.BindPFlags(monitorCmd.Flags()) + if err != nil { + panic(err) + } + err = viper.BindPFlags(migrateCmd.Flags()) + if err != nil { + panic(err) + } + err = viper.BindPFlags(exportCmd.Flags()) + if err != nil { + panic(err) + } + err = viper.BindPFlags(setConfigCmd.Flags()) + if err != nil { + panic(err) + } + err = viper.BindPFlags(getConfigCmd.Flags()) + if err != nil { + panic(err) + } + err = viper.BindPFlags(RootCmd.PersistentFlags()) + if err != nil { + panic(err) + } + + viper.SetEnvPrefix("GOALERT") + + // use underscores in env names + viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) + + viper.AutomaticEnv() +} diff --git a/app/contextlocker.go b/app/contextlocker.go new file mode 100644 index 0000000000..d0298d2812 --- /dev/null +++ b/app/contextlocker.go @@ -0,0 +1,128 @@ +package app + +import ( + "context" + "sync/atomic" +) + +type contextLocker struct { + readCount int64 + + lock chan lockReq + unlock chan chan struct{} + + rLock chan struct{} + rUnlock chan struct{} + rNotLocked chan struct{} +} +type lockReq struct { + cancel <-chan struct{} + ch chan bool +} + +func newContextLocker() *contextLocker { + c := &contextLocker{ + lock: make(chan lockReq), + unlock: make(chan chan struct{}, 1), + rLock: make(chan struct{}), + rUnlock: make(chan struct{}), + rNotLocked: make(chan struct{}), + } + go c.loop() + return c +} +func (c *contextLocker) writeLock(req lockReq) { + for atomic.LoadInt64(&c.readCount) > 0 { + select { + case <-c.rUnlock: + atomic.AddInt64(&c.readCount, -1) + case <-req.cancel: + req.ch <- false + return + } + } + + ch := make(chan struct{}) + c.unlock <- ch + req.ch <- true + for { + select { + case <-ch: + return + case <-c.rNotLocked: + } + } +} + +func (c *contextLocker) loop() { + for { + select { + // request for write lock always takes precedence + case req := <-c.lock: + c.writeLock(req) + continue + default: + } + + if atomic.LoadInt64(&c.readCount) == 0 { + select { + case req := <-c.lock: + c.writeLock(req) + case <-c.rLock: + atomic.AddInt64(&c.readCount, 1) + case <-c.rNotLocked: + } + continue + } + + select { + case req := <-c.lock: + c.writeLock(req) + case <-c.rLock: + atomic.AddInt64(&c.readCount, 1) + case <-c.rUnlock: + atomic.AddInt64(&c.readCount, -1) + } + } +} +func (c *contextLocker) RLockCount() int { + return int(atomic.LoadInt64(&c.readCount)) +} +func (c *contextLocker) Lock(ctx context.Context) error { + ch := make(chan bool) + select { + case <-ctx.Done(): + return ctx.Err() + case c.lock <- lockReq{cancel: ctx.Done(), ch: ch}: + } + + if <-ch { + return nil + } + + return ctx.Err() +} +func (c *contextLocker) Unlock() { + select { + case ch := <-c.unlock: + ch <- struct{}{} + default: + // safe to call, even if not write-locked (unlike RUnlock) + } +} +func (c *contextLocker) RLock(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + case c.rLock <- struct{}{}: + } + + return nil +} +func (c *contextLocker) RUnlock() { + select { + case c.rUnlock <- struct{}{}: + case c.rNotLocked <- struct{}{}: + panic("not locked") + } +} diff --git a/app/cooldown.go b/app/cooldown.go new file mode 100644 index 0000000000..8441f8ac2f --- /dev/null +++ b/app/cooldown.go @@ -0,0 +1,64 @@ +package app + +import ( + "context" + "time" +) + +type cooldown struct { + dur time.Duration + trigCh chan struct{} + waitCh chan struct{} +} + +func newCooldown(dur time.Duration) *cooldown { + c := &cooldown{ + dur: dur, + trigCh: make(chan struct{}), + waitCh: make(chan struct{}), + } + go c.loop() + return c +} +func (c *cooldown) loop() { + t := time.NewTimer(c.dur) + var active bool + + for { + if active { + select { + case <-t.C: + active = false + case c.trigCh <- struct{}{}: + t.Stop() + t = time.NewTimer(c.dur) + + active = true + } + continue + } + + select { + // not active, allow closing + case c.waitCh <- struct{}{}: + case c.trigCh <- struct{}{}: + t.Stop() + t = time.NewTimer(c.dur) + active = true + } + } +} + +// WaitContext will wait until there have been no new connections within the cooldown period. +func (c *cooldown) WaitContext(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + case <-c.waitCh: + return nil + } +} + +func (c *cooldown) Trigger() { + <-c.trigCh +} diff --git a/app/getsetconfig.go b/app/getsetconfig.go new file mode 100644 index 0000000000..c902c03bcd --- /dev/null +++ b/app/getsetconfig.go @@ -0,0 +1,73 @@ +package app + +import ( + "context" + "database/sql" + "os" + + "github.com/pkg/errors" + "github.com/spf13/viper" + "github.com/target/goalert/config" + "github.com/target/goalert/permission" + "github.com/target/goalert/util/log" +) + +func getSetConfig(setCfg bool, data []byte) error { + if viper.GetBool("verbose") { + log.EnableVerbose() + } + + err := viper.ReadInConfig() + // ignore file not found error + if _, ok := err.(viper.ConfigFileNotFoundError); err != nil && !ok { + return errors.Wrap(err, "read config") + } + + c, err := getConfig() + if err != nil { + return err + } + db, err := sql.Open("postgres", c.DBURL) + if err != nil { + return errors.Wrap(err, "connect to postgres") + } + defer db.Close() + ctx := context.Background() + ctx = permission.SystemContext(ctx, "SetConfig") + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return errors.Wrap(err, "start transaction") + } + defer tx.Rollback() + + s, err := config.NewStore(ctx, db, c.EncryptionKeys, "") + if err != nil { + return errors.Wrap(err, "init config store") + } + if setCfg { + id, err := s.SetConfigData(ctx, tx, data) + if err != nil { + return errors.Wrap(err, "save config") + } + + err = tx.Commit() + if err != nil { + return errors.Wrap(err, "commit changes") + } + log.Logf(ctx, "Saved config version %d", id) + return nil + } + + _, _, data, err = s.ConfigData(ctx, tx) + if err != nil { + return errors.Wrap(err, "read config") + } + + err = tx.Commit() + if err != nil { + return errors.Wrap(err, "commit") + } + + _, err = os.Stdout.Write(data) + return err +} diff --git a/app/healthcheck.go b/app/healthcheck.go new file mode 100644 index 0000000000..ba14c893ba --- /dev/null +++ b/app/healthcheck.go @@ -0,0 +1,44 @@ +package app + +import ( + "github.com/target/goalert/app/lifecycle" + "github.com/target/goalert/retry" + "github.com/target/goalert/util/errutil" + "net/http" + "time" + + "github.com/pkg/errors" +) + +func (app *App) healthCheck(w http.ResponseWriter, req *http.Request) { + if app.mgr.Status() == lifecycle.StatusShutdown { + http.Error(w, "server shutting down", http.StatusInternalServerError) + return + } + + ctx := req.Context() + err := retry.DoTemporaryError(func(_ int) error { + return app.db.PingContext(ctx) + }, + retry.Log(ctx), + retry.Limit(5), + retry.FibBackoff(100*time.Millisecond), + ) + + errutil.HTTPError(req.Context(), w, errors.Wrap(err, "engine cycle")) +} + +func (app *App) engineStatus(w http.ResponseWriter, req *http.Request) { + if app.mgr.Status() == lifecycle.StatusShutdown { + http.Error(w, "server shutting down", http.StatusInternalServerError) + return + } + + if app.cfg.APIOnly { + http.Error(w, "engine not running", http.StatusInternalServerError) + return + } + + err := app.engine.WaitNextCycle(req.Context()) + errutil.HTTPError(req.Context(), w, errors.Wrap(err, "engine cycle")) +} diff --git a/app/initauth.go b/app/initauth.go new file mode 100644 index 0000000000..848a874db8 --- /dev/null +++ b/app/initauth.go @@ -0,0 +1,53 @@ +package app + +import ( + "context" + + "github.com/pkg/errors" + "github.com/target/goalert/auth" + "github.com/target/goalert/auth/basic" + "github.com/target/goalert/auth/github" + "github.com/target/goalert/auth/oidc" +) + +func (app *App) initAuth(ctx context.Context) error { + + var err error + app.authHandler, err = auth.NewHandler(ctx, app.db, auth.HandlerConfig{ + UserStore: app.UserStore, + SessionKeyring: app.SessionKeyring, + IntKeyStore: app.IntegrationKeyStore, + }) + if err != nil { + return errors.Wrap(err, "init auth handler") + } + + cfg := oidc.Config{ + Keyring: app.OAuthKeyring, + NonceStore: app.NonceStore, + } + oidcProvider, err := oidc.NewProvider(ctx, cfg) + if err != nil { + return errors.Wrap(err, "init OIDC auth provider") + } + app.authHandler.AddIdentityProvider("oidc", oidcProvider) + + githubConfig := &github.Config{ + Keyring: app.OAuthKeyring, + NonceStore: app.NonceStore, + } + + githubProvider, err := github.NewProvider(ctx, githubConfig) + if err != nil { + return errors.Wrap(err, "init GitHub auth provider") + } + app.authHandler.AddIdentityProvider("github", githubProvider) + + basicProvider, err := basic.NewProvider(ctx, app.db) + if err != nil { + return errors.Wrap(err, "init basic auth provider") + } + app.authHandler.AddIdentityProvider("basic", basicProvider) + + return err +} diff --git a/app/initengine.go b/app/initengine.go new file mode 100644 index 0000000000..13d51f7810 --- /dev/null +++ b/app/initengine.go @@ -0,0 +1,57 @@ +package app + +import ( + "context" + "database/sql" + + "github.com/target/goalert/engine" + + "github.com/pkg/errors" +) + +func (app *App) initEngine(ctx context.Context) error { + + var regionIndex int + err := app.db.QueryRowContext(ctx, `SELECT id FROM region_ids WHERE name = $1`, app.cfg.RegionName).Scan(®ionIndex) + if err == sql.ErrNoRows { + // doesn't exist, try to create + err = app.db.QueryRowContext(ctx, ` + WITH inserted AS ( + INSERT INTO region_ids (name) VALUES ($1) + ON CONFLICT DO NOTHING + RETURNING id + ) + SELECT id FROM region_ids WHERE name = $1 + UNION + SELECT id FROM inserted + `, app.cfg.RegionName).Scan(®ionIndex) + } + if err != nil { + return errors.Wrap(err, "get region index") + } + + app.engine, err = engine.NewEngine(ctx, app.db, &engine.Config{ + AlertStore: app.AlertStore, + AlertlogStore: app.AlertlogStore, + ContactMethodStore: app.ContactMethodStore, + NotificationSender: app.notificationManager, + UserStore: app.UserStore, + NotificationStore: app.NotificationStore, + NCStore: app.NCStore, + + ConfigSource: app.ConfigStore, + + Keys: app.cfg.EncryptionKeys, + + MaxMessages: 50, + + DisableCycle: app.cfg.APIOnly, + }) + if err != nil { + return errors.Wrap(err, "init engine") + } + + app.notificationManager.RegisterReceiver(app.engine) + + return nil +} diff --git a/app/initgraphql.go b/app/initgraphql.go new file mode 100644 index 0000000000..00040f0a97 --- /dev/null +++ b/app/initgraphql.go @@ -0,0 +1,68 @@ +package app + +import ( + "context" + "github.com/target/goalert/graphql" + "github.com/target/goalert/graphql2/graphqlapp" + "github.com/target/goalert/schedule/shiftcalc" +) + +func (app *App) initGraphQL(ctx context.Context) error { + + shiftC := &shiftcalc.ShiftCalculator{ + RotStore: app.RotationStore, + RuleStore: app.ScheduleRuleStore, + SchedStore: app.ScheduleStore, + OStore: app.OverrideStore, + } + + app.graphql2 = &graphqlapp.App{ + DB: app.db, + UserStore: app.UserStore, + CMStore: app.ContactMethodStore, + NRStore: app.NotificationRuleStore, + NCStore: app.NCStore, + AlertStore: app.AlertStore, + ServiceStore: app.ServiceStore, + FavoriteStore: app.FavoriteStore, + PolicyStore: app.EscalationStore, + ScheduleStore: app.ScheduleStore, + RotationStore: app.RotationStore, + OnCallStore: app.OnCallStore, + TimeZoneStore: app.TimeZoneStore, + IntKeyStore: app.IntegrationKeyStore, + LabelStore: app.LabelStore, + RuleStore: app.ScheduleRuleStore, + OverrideStore: app.OverrideStore, + ConfigStore: app.ConfigStore, + NotificationStore: app.NotificationStore, + SlackStore: app.slackChan, + } + + var err error + app.graphql, err = graphql.NewHandler(ctx, graphql.Config{ + DB: app.db, + UserStore: app.UserStore, + AlertStore: app.AlertStore, + AlertLogStore: app.AlertlogStore, + CMStore: app.ContactMethodStore, + NRStore: app.NotificationRuleStore, + UserFavoriteStore: app.FavoriteStore, + ServiceStore: app.ServiceStore, + ScheduleStore: app.ScheduleStore, + RotationStore: app.RotationStore, + ShiftCalc: shiftC, + ScheduleRuleStore: app.ScheduleRuleStore, + EscalationStore: app.EscalationStore, + IntegrationKeyStore: app.IntegrationKeyStore, + LimitStore: app.LimitStore, + Resolver: app.Resolver, + NotificationStore: app.NotificationStore, + HeartbeatStore: app.HeartbeatStore, + OverrideStore: app.OverrideStore, + LabelStore: app.LabelStore, + OnCallStore: app.OnCallStore, + }) + + return err +} diff --git a/app/inithttp.go b/app/inithttp.go new file mode 100644 index 0000000000..10dd59e0db --- /dev/null +++ b/app/inithttp.go @@ -0,0 +1,220 @@ +package app + +import ( + "context" + "net/http" + "net/url" + "strings" + "time" + + "contrib.go.opencensus.io/exporter/stackdriver/propagation" + "github.com/target/goalert/config" + "github.com/target/goalert/genericapi" + "github.com/target/goalert/grafana" + "github.com/target/goalert/mailgun" + "github.com/target/goalert/notification/twilio" + "github.com/target/goalert/util/log" + "github.com/target/goalert/web" + "go.opencensus.io/plugin/ochttp" +) + +func (app *App) initHTTP(ctx context.Context) error { + + var traceMiddleware func(next http.Handler) http.Handler + if app.cfg.StackdriverProjectID != "" { + traceMiddleware = func(next http.Handler) http.Handler { + return &ochttp.Handler{ + IsPublicEndpoint: true, + Propagation: &propagation.HTTPFormat{}, + Handler: next, + } + } + } else { + traceMiddleware = func(next http.Handler) http.Handler { + return &ochttp.Handler{ + IsPublicEndpoint: true, + Handler: next, + } + } + } + + middleware := []func(http.Handler) http.Handler{ + traceMiddleware, + // add app config to request context + func(next http.Handler) http.Handler { return config.Handler(next, app.ConfigStore) }, + + // request cooldown tracking (for graceful shutdown) + func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if !strings.HasPrefix(req.URL.Path, "/health") { + app.cooldown.Trigger() + } + next.ServeHTTP(w, req) + }) + }, + + // redirect http to https if public URL is https + func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + fwdProto := req.Header.Get("x-forwarded-proto") + cfg := config.FromContext(req.Context()) + if strings.HasPrefix(cfg.General.PublicURL, "https:") && ((fwdProto == "http") || (fwdProto == "" && req.URL.Scheme == "http")) { + var u url.URL + u.Host = req.Host + u.Host = u.Hostname() // strip non-standard port if necessary + u.Scheme = "https" + u.Path = req.URL.Path + u.RawQuery = req.URL.RawQuery + http.Redirect(w, req, u.String(), http.StatusTemporaryRedirect) + return + } + next.ServeHTTP(w, req) + }) + }, + + // limit auth check counts (fail-safe for loops or DB access) + authCheckLimit(100), + + // request logging + logRequest(app.cfg.LogRequests), + + // max request time + timeout(2 * time.Minute), + + // remove public URL prefix + stripPrefixMiddleware(), + + // limit max request size + maxBodySizeMiddleware(app.cfg.MaxReqBodyBytes), + + // pause has to become before anything that uses the DB (like auth) + app.pauseHandler, + + // authenticate requests + app.authHandler.WrapHandler, + + // add auth info to request logs + logRequestAuth, + + wrapGzip, + } + + if app.cfg.Verbose { + middleware = append(middleware, func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + next.ServeHTTP(w, req.WithContext(log.EnableDebug(req.Context()))) + }) + }) + } + + mux := http.NewServeMux() + + generic := genericapi.NewHandler(genericapi.Config{ + AlertStore: app.AlertStore, + IntegrationKeyStore: app.IntegrationKeyStore, + HeartbeatStore: app.HeartbeatStore, + UserStore: app.UserStore, + }) + + mux.Handle("/api/graphql", app.graphql2.Handler()) + mux.Handle("/api/graphql/explore", app.graphql2.PlayHandler()) + + mux.HandleFunc("/api/v2/config", app.ConfigStore.ServeConfig) + + mux.HandleFunc("/api/v2/identity/providers", app.authHandler.ServeProviders) + mux.HandleFunc("/api/v2/identity/logout", app.authHandler.ServeLogout) + + basicAuth := app.authHandler.IdentityProviderHandler("basic") + mux.HandleFunc("/api/v2/identity/providers/basic", basicAuth) + + githubAuth := app.authHandler.IdentityProviderHandler("github") + mux.HandleFunc("/api/v2/identity/providers/github", githubAuth) + mux.HandleFunc("/api/v2/identity/providers/github/callback", githubAuth) + + oidcAuth := app.authHandler.IdentityProviderHandler("oidc") + mux.HandleFunc("/api/v2/identity/providers/oidc", oidcAuth) + mux.HandleFunc("/api/v2/identity/providers/oidc/callback", oidcAuth) + + mux.HandleFunc("/api/v2/mailgun/incoming", mailgun.IngressWebhooks(app.AlertStore, app.IntegrationKeyStore)) + mux.HandleFunc("/api/v2/grafana/incoming", grafana.GrafanaToEventsAPI(app.AlertStore, app.IntegrationKeyStore)) + + mux.HandleFunc("/api/v2/generic/incoming", generic.ServeCreateAlert) + mux.HandleFunc("/api/v2/heartbeat/", generic.ServeHeartbeatCheck) + mux.HandleFunc("/api/v2/user-avatar/", generic.ServeUserAvatar) + + mux.HandleFunc("/api/v2/twilio/message", app.twilioSMS.ServeMessage) + mux.HandleFunc("/api/v2/twilio/message/status", app.twilioSMS.ServeStatusCallback) + mux.HandleFunc("/api/v2/twilio/call", app.twilioVoice.ServeCall) + mux.HandleFunc("/api/v2/twilio/call/status", app.twilioVoice.ServeStatusCallback) + + // Legacy (v1) API mappings + mux.HandleFunc("/v1/graphql", app.graphql.ServeHTTP) + muxRewrite(mux, "/v1/graphql2", "/api/graphql") + muxRedirect(mux, "/v1/graphql2/explore", "/api/graphql/explore") + muxRewrite(mux, "/v1/config", "/api/v2/config") + muxRewrite(mux, "/v1/identity/providers", "/api/v2/identity/providers") + muxRewritePrefix(mux, "/v1/identity/providers/", "/api/v2/identity/providers/") + muxRewrite(mux, "/v1/identity/logout", "/api/v2/identity/logout") + + muxRewrite(mux, "/v1/webhooks/mailgun", "/api/v2/mailgun/incoming") + muxRewrite(mux, "/v1/webhooks/grafana", "/api/v2/grafana/incoming") + muxRewrite(mux, "/v1/api/alerts", "/api/v2/generic/incoming") + muxRewritePrefix(mux, "/v1/api/heartbeat/", "/api/v2/heartbeat/") + muxRewriteWith(mux, "/v1/api/users/", func(req *http.Request) *http.Request { + parts := strings.Split(strings.TrimSuffix(req.URL.Path, "/avatar"), "/") + req.URL.Path = "/api/v2/user-avatar/" + parts[len(parts)-1] + return req + }) + + muxRewrite(mux, "/v1/twilio/sms/messages", "/api/v2/twilio/message") + muxRewrite(mux, "/v1/twilio/sms/status", "/api/v2/twilio/message/status") + muxRewrite(mux, "/v1/twilio/voice/call", "/api/v2/twilio/call?type=alert") + muxRewrite(mux, "/v1/twilio/voice/alert-status", "/api/v2/twilio/call?type=alert-status") + muxRewrite(mux, "/v1/twilio/voice/test", "/api/v2/twilio/call?type=test") + muxRewrite(mux, "/v1/twilio/voice/stop", "/api/v2/twilio/call?type=stop") + muxRewrite(mux, "/v1/twilio/voice/verify", "/api/v2/twilio/call?type=verify") + muxRewrite(mux, "/v1/twilio/voice/status", "/api/v2/twilio/call/status") + + twilioHandler := twilio.WrapValidation( + // go back to the regular mux after validation + twilio.WrapHeaderHack(mux), + *app.twilioConfig, + ) + + topMux := http.NewServeMux() + + // twilio calls should go through the validation handler first + // since the signature is based on the original URL + topMux.Handle("/v1/twilio/", twilioHandler) + topMux.Handle("/api/v2/twilio/", twilioHandler) + + topMux.Handle("/v1/", mux) + topMux.Handle("/api/", mux) + + topMux.HandleFunc("/health", app.healthCheck) + topMux.HandleFunc("/health/engine", app.engineStatus) + + webH, err := web.NewHandler(app.cfg.UIURL) + if err != nil { + return err + } + // non-API/404s go to UI handler + topMux.Handle("/", webH) + + app.srv = &http.Server{ + Handler: applyMiddleware(topMux, middleware...), + + ReadHeaderTimeout: time.Second * 30, + ReadTimeout: time.Minute, + WriteTimeout: time.Minute, + IdleTimeout: time.Minute * 2, + MaxHeaderBytes: app.cfg.MaxReqHeaderBytes, + } + + // Ingress/load balancer/proxy can do keep-alives, backend doesn't need it. + // It also makes zero downtime deploys nearly impossible; an idle connection + // could have an in-flight request when the server closes it. + app.srv.SetKeepAlivesEnabled(false) + + return nil +} diff --git a/app/inithttputil.go b/app/inithttputil.go new file mode 100644 index 0000000000..3e259ef5c3 --- /dev/null +++ b/app/inithttputil.go @@ -0,0 +1,67 @@ +package app + +import ( + "net/http" + "net/url" + "strings" +) + +func applyMiddleware(h http.Handler, middleware ...func(http.Handler) http.Handler) http.Handler { + // Needs to be wrapped in reverse order + // so that the first one listed, is the "outermost" + // handler, thus preserving the expected run-order. + for i := len(middleware) - 1; i >= 0; i-- { + h = middleware[i](h) + } + return h +} + +func muxRedirect(mux *http.ServeMux, from, to string) { + mux.HandleFunc(from, func(w http.ResponseWriter, req *http.Request) { + http.Redirect(w, req, to, http.StatusTemporaryRedirect) + }) +} +func muxRedirectPrefix(mux *http.ServeMux, prefix, to string) { + mux.HandleFunc(prefix, func(w http.ResponseWriter, req *http.Request) { + http.Redirect(w, req, to+strings.TrimPrefix(req.URL.Path, prefix), http.StatusTemporaryRedirect) + }) +} +func muxRewriteWith(mux *http.ServeMux, from string, fn func(req *http.Request) *http.Request) { + mux.HandleFunc(from, + func(w http.ResponseWriter, req *http.Request) { + mux.ServeHTTP(w, fn(req)) + }) +} +func muxRewrite(mux *http.ServeMux, from, to string) { + u, err := url.Parse(to) + if err != nil { + panic(err) + } + uQ := u.Query() + + muxRewriteWith(mux, from, func(req *http.Request) *http.Request { + req.URL.Path = u.Path + q := req.URL.Query() + for key := range uQ { + q.Set(key, uQ.Get(key)) + } + req.URL.RawQuery = q.Encode() + return req + }) +} +func muxRewritePrefix(mux *http.ServeMux, prefix, to string) { + u, err := url.Parse(to) + if err != nil { + panic(err) + } + uQ := u.Query() + muxRewriteWith(mux, prefix, func(req *http.Request) *http.Request { + req.URL.Path = u.Path + strings.TrimPrefix(req.URL.Path, prefix) + q := req.URL.Query() + for key := range uQ { + q.Set(key, uQ.Get(key)) + } + req.URL.RawQuery = q.Encode() + return req + }) +} diff --git a/app/inithttputil_test.go b/app/inithttputil_test.go new file mode 100644 index 0000000000..cecda53dfb --- /dev/null +++ b/app/inithttputil_test.go @@ -0,0 +1,146 @@ +package app + +import ( + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMuxRedirect(t *testing.T) { + mux := http.NewServeMux() + + muxRedirect(mux, "/old/path", "/new/path") + srv := httptest.NewServer(mux) + defer srv.Close() + + req, err := http.NewRequest("GET", srv.URL+"/old/path", nil) + assert.Nil(t, err) + + resp, err := http.DefaultTransport.RoundTrip(req) + assert.Nil(t, err) + + assert.Equal(t, http.StatusTemporaryRedirect, resp.StatusCode, "Status Code") + loc, err := resp.Location() + assert.Nil(t, err) + + assert.Equal(t, srv.URL+"/new/path", loc.String(), "redirect URL") +} + +func TestMuxRedirectPrefix(t *testing.T) { + mux := http.NewServeMux() + + muxRedirectPrefix(mux, "/old/", "/new/") + srv := httptest.NewServer(mux) + defer srv.Close() + + req, err := http.NewRequest("GET", srv.URL+"/old/path", nil) + assert.Nil(t, err) + + resp, err := http.DefaultTransport.RoundTrip(req) + assert.Nil(t, err) + + assert.Equal(t, http.StatusTemporaryRedirect, resp.StatusCode, "Status Code") + loc, err := resp.Location() + assert.Nil(t, err) + + assert.Equal(t, srv.URL+"/new/path", loc.String(), "redirect URL") +} + +func TestMuxRewrite(t *testing.T) { + t.Run("simple rewrite", func(t *testing.T) { + mux := http.NewServeMux() + mux.HandleFunc("/new/path", func(w http.ResponseWriter, req *http.Request) { + io.WriteString(w, req.URL.String()) + }) + muxRewrite(mux, "/old/path", "/new/path") + + srv := httptest.NewServer(mux) + defer srv.Close() + + req, err := http.NewRequest("GET", srv.URL+"/old/path", nil) + assert.Nil(t, err) + + resp, err := http.DefaultTransport.RoundTrip(req) + assert.Nil(t, err) + + assert.Equal(t, http.StatusOK, resp.StatusCode, "Status Code") + data, err := ioutil.ReadAll(resp.Body) + assert.Nil(t, err) + + assert.Equal(t, "/new/path", string(data)) + }) + t.Run("query params", func(t *testing.T) { + mux := http.NewServeMux() + mux.HandleFunc("/new/path", func(w http.ResponseWriter, req *http.Request) { + io.WriteString(w, req.URL.String()) + }) + muxRewrite(mux, "/old/path", "/new/path?a=b") + + srv := httptest.NewServer(mux) + defer srv.Close() + + req, err := http.NewRequest("GET", srv.URL+"/old/path?c=d", nil) + assert.Nil(t, err) + + resp, err := http.DefaultTransport.RoundTrip(req) + assert.Nil(t, err) + + assert.Equal(t, http.StatusOK, resp.StatusCode, "Status Code") + data, err := ioutil.ReadAll(resp.Body) + assert.Nil(t, err) + + assert.Equal(t, "/new/path?a=b&c=d", string(data)) + }) +} + +func TestMuxRewritePrefix(t *testing.T) { + t.Run("simple prefix", func(t *testing.T) { + mux := http.NewServeMux() + mux.HandleFunc("/new/path", func(w http.ResponseWriter, req *http.Request) { + io.WriteString(w, req.URL.String()) + }) + muxRewritePrefix(mux, "/old/", "/new/") + + srv := httptest.NewServer(mux) + defer srv.Close() + + req, err := http.NewRequest("GET", srv.URL+"/old/path", nil) + assert.Nil(t, err) + + resp, err := http.DefaultTransport.RoundTrip(req) + assert.Nil(t, err) + + assert.Equal(t, http.StatusOK, resp.StatusCode, "Status Code") + data, err := ioutil.ReadAll(resp.Body) + assert.Nil(t, err) + + assert.Equal(t, "/new/path", string(data)) + }) + + t.Run("query params", func(t *testing.T) { + mux := http.NewServeMux() + mux.HandleFunc("/new/path", func(w http.ResponseWriter, req *http.Request) { + io.WriteString(w, req.URL.String()) + }) + muxRewritePrefix(mux, "/old/", "/new/?c=d") + + srv := httptest.NewServer(mux) + defer srv.Close() + + req, err := http.NewRequest("GET", srv.URL+"/old/path?a=b", nil) + assert.Nil(t, err) + + resp, err := http.DefaultTransport.RoundTrip(req) + assert.Nil(t, err) + + assert.Equal(t, http.StatusOK, resp.StatusCode, "Status Code") + data, err := ioutil.ReadAll(resp.Body) + assert.Nil(t, err) + + assert.Equal(t, "/new/path?a=b&c=d", string(data)) + }) +} diff --git a/app/initslack.go b/app/initslack.go new file mode 100644 index 0000000000..91128a8700 --- /dev/null +++ b/app/initslack.go @@ -0,0 +1,20 @@ +package app + +import ( + "context" + + "github.com/target/goalert/notification" + "github.com/target/goalert/notification/slack" +) + +func (app *App) initSlack(ctx context.Context) error { + var err error + app.slackChan, err = slack.NewChannelSender(ctx, slack.Config{ + BaseURL: app.cfg.SlackBaseURL, + }) + if err != nil { + return err + } + app.notificationManager.RegisterSender(notification.DestTypeSlackChannel, "Slack-Channel", app.slackChan) + return nil +} diff --git a/app/initstores.go b/app/initstores.go new file mode 100644 index 0000000000..db05712919 --- /dev/null +++ b/app/initstores.go @@ -0,0 +1,227 @@ +package app + +import ( + "context" + "net/url" + + "github.com/target/goalert/alert" + alertlog "github.com/target/goalert/alert/log" + "github.com/target/goalert/auth/nonce" + "github.com/target/goalert/config" + "github.com/target/goalert/engine/resolver" + "github.com/target/goalert/escalation" + "github.com/target/goalert/heartbeat" + "github.com/target/goalert/integrationkey" + "github.com/target/goalert/keyring" + "github.com/target/goalert/label" + "github.com/target/goalert/limit" + "github.com/target/goalert/notification" + "github.com/target/goalert/notification/slack" + "github.com/target/goalert/notificationchannel" + "github.com/target/goalert/oncall" + "github.com/target/goalert/override" + "github.com/target/goalert/schedule" + "github.com/target/goalert/schedule/rotation" + "github.com/target/goalert/schedule/rule" + "github.com/target/goalert/service" + "github.com/target/goalert/timezone" + "github.com/target/goalert/user" + "github.com/target/goalert/user/contactmethod" + "github.com/target/goalert/user/favorite" + "github.com/target/goalert/user/notificationrule" + + "github.com/pkg/errors" +) + +func (app *App) initStores(ctx context.Context) error { + var err error + + if app.ConfigStore == nil { + var fallback url.URL + fallback.Scheme = "http" + fallback.Host = app.l.Addr().String() + app.ConfigStore, err = config.NewStore(ctx, app.db, app.cfg.EncryptionKeys, fallback.String()) + } + if err != nil { + return errors.Wrap(err, "init config store") + } + + if app.NonceStore == nil { + app.NonceStore, err = nonce.NewDB(ctx, app.db) + } + if err != nil { + return errors.Wrap(err, "init nonce store") + } + + if app.OAuthKeyring == nil { + app.OAuthKeyring, err = keyring.NewDB(ctx, app.db, &keyring.Config{ + Name: "oauth-state", + RotationDays: 1, + MaxOldKeys: 1, + Keys: app.cfg.EncryptionKeys, + }) + } + if err != nil { + return errors.Wrap(err, "init oauth state keyring") + } + + if app.SessionKeyring == nil { + app.SessionKeyring, err = keyring.NewDB(ctx, app.db, &keyring.Config{ + Name: "browser-sessions", + RotationDays: 1, + MaxOldKeys: 30, + Keys: app.cfg.EncryptionKeys, + }) + } + if err != nil { + return errors.Wrap(err, "init session keyring") + } + + if app.AlertlogStore == nil { + app.AlertlogStore, err = alertlog.NewDB(ctx, app.db) + } + if err != nil { + return errors.Wrap(err, "init alertlog store") + } + + if app.AlertStore == nil { + app.AlertStore, err = alert.NewDB(ctx, app.db, app.AlertlogStore) + } + if err != nil { + return errors.Wrap(err, "init alert store") + } + + if app.ContactMethodStore == nil { + app.ContactMethodStore, err = contactmethod.NewDB(ctx, app.db) + } + if err != nil { + return errors.Wrap(err, "init contact method store") + } + + if app.NotificationRuleStore == nil { + app.NotificationRuleStore, err = notificationrule.NewDB(ctx, app.db) + } + if err != nil { + return errors.Wrap(err, "init notification rule store") + } + + if app.ServiceStore == nil { + app.ServiceStore, err = service.NewDB(ctx, app.db) + } + if err != nil { + return errors.Wrap(err, "init service store") + } + if app.ScheduleStore == nil { + app.ScheduleStore, err = schedule.NewDB(ctx, app.db) + } + if err != nil { + return errors.Wrap(err, "init schedule store") + } + if app.RotationStore == nil { + app.RotationStore, err = rotation.NewDB(ctx, app.db) + } + if err != nil { + return errors.Wrap(err, "init rotation store") + } + + if app.UserStore == nil { + app.UserStore, err = user.NewDB(ctx, app.db) + } + if err != nil { + return errors.Wrap(err, "init user store") + } + + if app.NCStore == nil { + app.NCStore, err = notificationchannel.NewDB(ctx, app.db) + } + if err != nil { + return errors.Wrap(err, "init notification channel store") + } + + if app.EscalationStore == nil { + app.EscalationStore, err = escalation.NewDB(ctx, app.db, escalation.Config{ + LogStore: app.AlertlogStore, + NCStore: app.NCStore, + SlackLookupFunc: func(ctx context.Context, channelID string) (*slack.Channel, error) { + return app.slackChan.Channel(ctx, channelID) + }, + }) + } + if err != nil { + return errors.Wrap(err, "init escalation policy store") + } + + if app.IntegrationKeyStore == nil { + app.IntegrationKeyStore, err = integrationkey.NewDB(ctx, app.db) + } + if err != nil { + return errors.Wrap(err, "init integration key store") + } + + if app.ScheduleRuleStore == nil { + app.ScheduleRuleStore, err = rule.NewDB(ctx, app.db) + } + if err != nil { + return errors.Wrap(err, "init schedule rule store") + } + + if app.NotificationStore == nil { + app.NotificationStore, err = notification.NewDB(ctx, app.db) + } + if err != nil { + return errors.Wrap(err, "init notification store") + } + + if app.FavoriteStore == nil { + app.FavoriteStore, err = favorite.NewDB(ctx, app.db) + } + if err != nil { + return errors.Wrap(err, "init favorite store") + } + + if app.OverrideStore == nil { + app.OverrideStore, err = override.NewDB(ctx, app.db) + } + if err != nil { + return errors.Wrap(err, "init override store") + } + + if app.Resolver == nil { + app.Resolver, err = resolver.NewDB(ctx, app.db, app.ScheduleRuleStore, app.ScheduleStore) + } + if err != nil { + return errors.Wrap(err, "init resolver") + } + + if app.LimitStore == nil { + app.LimitStore, err = limit.NewDB(ctx, app.db) + } + if err != nil { + return errors.Wrap(err, "init limit config store") + } + if app.HeartbeatStore == nil { + app.HeartbeatStore, err = heartbeat.NewDB(ctx, app.db) + } + if err != nil { + return errors.Wrap(err, "init heartbeat store") + } + if app.LabelStore == nil { + app.LabelStore, err = label.NewDB(ctx, app.db) + } + if err != nil { + return errors.Wrap(err, "init label store") + } + + if app.OnCallStore == nil { + app.OnCallStore, err = oncall.NewDB(ctx, app.db, app.ScheduleRuleStore) + } + if err != nil { + return errors.Wrap(err, "init on-call store") + } + + if app.TimeZoneStore == nil { + app.TimeZoneStore = timezone.NewStore(ctx, app.db) + } + + return nil +} diff --git a/app/inittwilio.go b/app/inittwilio.go new file mode 100644 index 0000000000..549888f84f --- /dev/null +++ b/app/inittwilio.go @@ -0,0 +1,34 @@ +package app + +import ( + "context" + "net/http" + + "github.com/target/goalert/notification" + "github.com/target/goalert/notification/twilio" + + "github.com/pkg/errors" + "go.opencensus.io/plugin/ochttp" +) + +func (app *App) initTwilio(ctx context.Context) error { + app.twilioConfig = &twilio.Config{ + APIURL: app.cfg.TwilioBaseURL, + Client: &http.Client{Transport: &ochttp.Transport{}}, + } + + var err error + app.twilioSMS, err = twilio.NewSMS(ctx, app.db, app.twilioConfig) + if err != nil { + return errors.Wrap(err, "init TwilioSMS") + } + app.notificationManager.RegisterSender(notification.DestTypeSMS, "Twilio-SMS", app.twilioSMS) + + app.twilioVoice, err = twilio.NewVoice(ctx, app.db, app.twilioConfig) + if err != nil { + return errors.Wrap(err, "init TwilioVoice") + } + app.notificationManager.RegisterSender(notification.DestTypeVoice, "Twilio-Voice", app.twilioVoice) + + return nil +} diff --git a/app/lifecycle/manager.go b/app/lifecycle/manager.go new file mode 100644 index 0000000000..113d9ba693 --- /dev/null +++ b/app/lifecycle/manager.go @@ -0,0 +1,402 @@ +package lifecycle + +import ( + "context" + + "github.com/pkg/errors" +) + +// Status represents lifecycle state. +type Status int + +// Possible states. +const ( + StatusUnknown Status = iota + StatusStarting + StatusReady + StatusShutdown + StatusPausing + StatusPaused +) + +// Static errors +var ( + ErrAlreadyStarted = errors.New("already started") + ErrShutdown = errors.New("shutting down") + ErrNotStarted = errors.New("not started") + ErrPauseUnsupported = errors.New("pause not supported or unset") +) + +// Manager is used to wrap lifecycle methods with strong guarantees. +type Manager struct { + startupFunc func(context.Context) error + runFunc func(context.Context) error + shutdownFunc func(context.Context) error + pauseResume PauseResumer + + status chan Status + + startupCancel func() + startupDone chan struct{} + + runCancel func() + runDone chan struct{} + + shutdownCancel func() + shutdownDone chan struct{} + shutdownErr error + + pauseCancel func() + pauseDone chan struct{} + pauseStart chan struct{} + pauseErr error + isPausing bool +} + +var _ Pausable = &Manager{} +var _ PauseResumer = &Manager{} + +// NewManager will construct a new manager wrapping the provided +// run and shutdown funcs. +func NewManager(run, shutdown func(context.Context) error) *Manager { + mgr := &Manager{ + runFunc: run, + shutdownFunc: shutdown, + + runDone: make(chan struct{}), + startupDone: make(chan struct{}), + shutdownDone: make(chan struct{}), + pauseStart: make(chan struct{}), + status: make(chan Status, 1), + } + mgr.status <- StatusUnknown + return mgr +} + +// SetStartupFunc can be used to optionally specify a startup function that +// will be called before calling run. +func (m *Manager) SetStartupFunc(fn func(context.Context) error) error { + s := <-m.status + switch s { + case StatusShutdown: + m.status <- s + return ErrShutdown + case StatusUnknown: + m.startupFunc = fn + m.status <- s + return nil + default: + m.status <- s + return ErrAlreadyStarted + } +} + +// SetPauseResumer will set the PauseResumer used by Pause and Resume methods. +func (m *Manager) SetPauseResumer(pr PauseResumer) error { + s := <-m.status + if m.isPausing || s == StatusPausing || s == StatusPaused { + m.status <- s + return errors.New("cannot SetPauseResumer during pause operation") + } + m.pauseResume = pr + m.status <- s + return nil +} + +// IsPausing will return true if the manager is in a state of +// pause, or is currently fulfilling a Pause request. +func (m *Manager) IsPausing() bool { + s := <-m.status + isPausing := m.isPausing + m.status <- s + switch s { + case StatusPausing, StatusPaused: + return true + case StatusShutdown: + return true + } + return isPausing +} + +// PauseWait will return a channel that blocks until a pause operation begins. +func (m *Manager) PauseWait() <-chan struct{} { + s := <-m.status + ch := m.pauseStart + m.status <- s + return ch +} + +// WaitForStartup will wait for startup to complete (even if failed or shutdown). +// err is nil unless context deadline is reached. +func (m *Manager) WaitForStartup(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + case <-m.startupDone: + return nil + } +} + +// Status returns the current status. +func (m *Manager) Status() Status { + s := <-m.status + m.status <- s + return s +} + +// Run starts the main loop. +func (m *Manager) Run(ctx context.Context) error { + s := <-m.status + switch s { + case StatusShutdown: + m.status <- s + return ErrShutdown + case StatusUnknown: + // ok + default: + m.status <- s + return ErrAlreadyStarted + } + + startCtx, cancel := context.WithCancel(ctx) + defer cancel() + m.startupCancel = cancel + startupFunc := m.startupFunc + m.status <- StatusStarting + + var err error + if startupFunc != nil { + err = startupFunc(startCtx) + } + cancel() + + s = <-m.status + + switch s { + case StatusShutdown: + m.status <- s + // no error on shutdown while starting + return nil + case StatusStarting: + if err != nil { + m.status <- s + close(m.startupDone) + return err + } + // ok + default: + m.status <- s + panic("unexpected lifecycle state") + } + + ctx, m.runCancel = context.WithCancel(ctx) + close(m.startupDone) + m.status <- StatusReady + + err = m.runFunc(ctx) + close(m.runDone) + <-m.shutdownDone + return err +} + +// Shutdown begins the shutdown procedure. +func (m *Manager) Shutdown(ctx context.Context) error { + initShutdown := func() { + ctx, m.shutdownCancel = context.WithCancel(ctx) + m.status <- StatusShutdown + } + + var isRunning bool + s := <-m.status + switch s { + case StatusShutdown: + m.status <- s + select { + case <-m.shutdownDone: + case <-ctx.Done(): + // if we timeout before the existing call, cancel it's context + m.shutdownCancel() + <-m.shutdownDone + } + return m.shutdownErr + case StatusStarting: + m.startupCancel() + close(m.pauseStart) + initShutdown() + <-m.startupDone + case StatusUnknown: + initShutdown() + close(m.pauseStart) + close(m.shutdownDone) + return nil + case StatusPausing: + isRunning = true + m.pauseCancel() + initShutdown() + <-m.pauseDone + case StatusReady: + close(m.pauseStart) + fallthrough + case StatusPaused: + isRunning = true + initShutdown() + } + + defer close(m.shutdownDone) + defer m.shutdownCancel() + + err := m.shutdownFunc(ctx) + + if isRunning { + m.runCancel() + <-m.runDone + } + + return err +} + +// Pause will bein a pause opperation. +// SetPauseResumer must have been called or ErrPauseUnsupported is returned. +// +// Pause is atomic and guarantees a paused state if nil is returned +// or normal operation otherwise. +func (m *Manager) Pause(ctx context.Context) error { + s := <-m.status + if m.pauseResume == nil { + m.status <- s + return ErrPauseUnsupported + } + switch s { + case StatusShutdown: + m.status <- s + return ErrShutdown + case StatusPaused: + m.status <- s + return nil + case StatusPausing: + pauseDone := m.pauseDone + m.status <- s + select { + case <-ctx.Done(): + return ctx.Err() + case <-pauseDone: + return m.Pause(ctx) + } + case StatusStarting, StatusUnknown: + if m.isPausing { + pauseDone := m.pauseDone + m.status <- s + select { + case <-ctx.Done(): + return ctx.Err() + case <-pauseDone: + return m.Pause(ctx) + } + } + case StatusReady: + // ok + } + + ctx, m.pauseCancel = context.WithCancel(ctx) + m.pauseDone = make(chan struct{}) + m.isPausing = true + defer close(m.pauseDone) + defer m.pauseCancel() + m.pauseErr = nil + if s != StatusReady { + m.status <- s + select { + case <-ctx.Done(): + s = <-m.status + m.isPausing = false + m.status <- s + return ctx.Err() + case <-m.startupDone: + } + + s = <-m.status + switch s { + case StatusShutdown: + m.status <- s + return ErrShutdown + case StatusReady: + // ok + default: + m.status <- s + panic("unexpected lifecycle state") + } + } + + close(m.pauseStart) + m.status <- StatusPausing + err := m.pauseResume.Pause(ctx) + m.pauseCancel() + s = <-m.status + switch s { + case StatusShutdown: + m.pauseErr = ErrShutdown + m.isPausing = false + m.status <- s + return ErrShutdown + case StatusPausing: + // ok + default: + m.isPausing = false + m.status <- s + panic("unexpected lifecycle state") + } + + if err != nil { + m.pauseErr = err + m.isPausing = false + m.pauseStart = make(chan struct{}) + m.status <- StatusReady + return err + } + + m.pauseErr = nil + m.isPausing = false + m.status <- StatusPaused + return nil +} + +// Resume will always result in normal operation (unless Shutdown was called). +// +// If the context deadline is reached, "graceful" operations may fail, but +// will always result in a Ready state. +func (m *Manager) Resume(ctx context.Context) error { + s := <-m.status + if m.pauseResume == nil { + m.status <- s + return ErrPauseUnsupported + } + switch s { + case StatusShutdown: + m.status <- s + return ErrShutdown + case StatusUnknown, StatusStarting: + if !m.isPausing { + m.status <- s + return nil + } + + fallthrough + case StatusPausing: + m.pauseCancel() + pauseDone := m.pauseDone + m.status <- s + <-pauseDone + return m.Resume(ctx) + case StatusPaused: + // ok + case StatusReady: + m.status <- s + return nil + } + + m.pauseStart = make(chan struct{}) + err := m.pauseResume.Resume(ctx) + m.status <- StatusReady + + return err +} diff --git a/app/lifecycle/manager_test.go b/app/lifecycle/manager_test.go new file mode 100644 index 0000000000..5d760da012 --- /dev/null +++ b/app/lifecycle/manager_test.go @@ -0,0 +1,196 @@ +package lifecycle + +import ( + "context" + "testing" + "time" +) + +func TestManager_PauseingShutdown(t *testing.T) { + + _, pr := buildPause() + ran := make(chan struct{}) + run := func(ctx context.Context) error { <-ctx.Done(); close(ran); return ctx.Err() } + shut := func(ctx context.Context) error { return nil } + mgr := NewManager(run, shut) + mgr.SetPauseResumer(pr) + + go mgr.Run(context.Background()) + + var err error + errCh := make(chan error) + pauseErr := make(chan error) + + tc := time.NewTimer(time.Second) + defer tc.Stop() + + go func() { pauseErr <- mgr.Pause(context.Background()) }() + tc.Reset(time.Second) + select { + case <-mgr.PauseWait(): + case <-tc.C: + t.Fatal("pause didn't start") + } + // done(nil) + + go func() { errCh <- mgr.Shutdown(context.Background()) }() + + tc.Reset(time.Second) + select { + case <-tc.C: + t.Fatal("shutdown never finished") + case err = <-errCh: + } + if err != nil { + t.Fatalf("shutdown error: got %v; want nil", err) + } + + tc.Reset(time.Second) + select { + case <-tc.C: + t.Fatal("run never got canceled") + case <-ran: + } + + tc.Reset(time.Second) + select { + case <-tc.C: + t.Fatal("pause never finished") + case <-pauseErr: + } + +} + +func TestManager_PauseShutdown(t *testing.T) { + done, pr := buildPause() + ran := make(chan struct{}) + run := func(ctx context.Context) error { <-ctx.Done(); close(ran); return ctx.Err() } + shut := func(ctx context.Context) error { return nil } + mgr := NewManager(run, shut) + mgr.SetPauseResumer(pr) + + go mgr.Run(context.Background()) + + var err error + errCh := make(chan error) + go func() { errCh <- mgr.Pause(context.Background()) }() + done(nil) + + tc := time.NewTimer(time.Second) + defer tc.Stop() + select { + case <-tc.C: + t.Fatal("pause never finished") + case err = <-errCh: + } + if err != nil { + t.Fatalf("got %v; want nil", err) + } + + go func() { errCh <- mgr.Shutdown(context.Background()) }() + + tc.Reset(time.Second) + select { + case <-tc.C: + t.Fatal("shutdown never finished") + case err = <-errCh: + } + if err != nil { + t.Fatalf("shutdown error: got %v; want nil", err) + } + + tc.Reset(time.Second) + select { + case <-tc.C: + t.Fatal("run never got canceled") + case <-ran: + } + +} + +func TestManager_PauseResume(t *testing.T) { + done, pr := buildPause() + run := func(ctx context.Context) error { <-ctx.Done(); return ctx.Err() } + shut := func(ctx context.Context) error { return nil } + mgr := NewManager(run, shut) + mgr.SetPauseResumer(pr) + + go mgr.Run(context.Background()) + + var err error + errCh := make(chan error) + go func() { errCh <- mgr.Pause(context.Background()) }() + done(nil) + + tc := time.NewTimer(time.Second) + defer tc.Stop() + select { + case <-tc.C: + t.Fatal("pause never finished") + case err = <-errCh: + } + if err != nil { + t.Fatalf("got %v; want nil", err) + } + + go func() { errCh <- mgr.Resume(context.Background()) }() + + tc.Reset(time.Second) + select { + case <-tc.C: + t.Fatal("resume never finished") + case err = <-errCh: + } + if err != nil { + t.Fatalf("resume error: got %v; want nil", err) + } + +} + +func TestManager_PauseingResume(t *testing.T) { + + _, pr := buildPause() + ran := make(chan struct{}) + run := func(ctx context.Context) error { <-ctx.Done(); close(ran); return ctx.Err() } + shut := func(ctx context.Context) error { return nil } + mgr := NewManager(run, shut) + mgr.SetPauseResumer(pr) + + go mgr.Run(context.Background()) + + var err error + errCh := make(chan error) + pauseErr := make(chan error) + + tc := time.NewTimer(time.Second) + defer tc.Stop() + + go func() { pauseErr <- mgr.Pause(context.Background()) }() + tc.Reset(time.Second) + select { + case <-mgr.PauseWait(): + case <-tc.C: + t.Fatal("pause didn't start") + } + // done(nil) + + go func() { errCh <- mgr.Resume(context.Background()) }() + + tc.Reset(time.Second) + select { + case <-tc.C: + t.Fatal("resume never finished") + case err = <-errCh: + } + if err != nil { + t.Fatalf("resume error: got %v; want nil", err) + } + + tc.Reset(time.Second) + select { + case <-tc.C: + t.Fatal("pause never finished") + case <-pauseErr: + } + +} diff --git a/app/lifecycle/pauseable.go b/app/lifecycle/pauseable.go new file mode 100644 index 0000000000..27ec37731e --- /dev/null +++ b/app/lifecycle/pauseable.go @@ -0,0 +1,15 @@ +package lifecycle + +// Pausable is able to indicate if a pause operation is on-going. +// +// It is used in cases to initiate a graceful/safe abort of long-running operations +// when IsPausing returns true. +type Pausable interface { + IsPausing() bool + + // PauseWait will block until a pause operation begins. + // + // It should only be used once, it will not block again + // once resume is called. + PauseWait() <-chan struct{} +} diff --git a/app/lifecycle/pauseresumer.go b/app/lifecycle/pauseresumer.go new file mode 100644 index 0000000000..116c9618ef --- /dev/null +++ b/app/lifecycle/pauseresumer.go @@ -0,0 +1,123 @@ +package lifecycle + +import ( + "context" + + "github.com/pkg/errors" +) + +// A PauseResumer can be atomically paused and resumed. +type PauseResumer interface { + // Pause should result in pausing all operations if nil is returned. + // + // If a pause cannot complete within the context deadline, + // the context error should be returned, and normal operation should + // resume, as if pause was never called. + Pause(context.Context) error + + // Resume should always result in normal operation. + // + // Context can be used for control of graceful operations, + // but Resume should not return until normal operation is restored. + // + // Operations that are required for resuming, should use a background context + // internally (possibly linking any trace spans). + Resume(context.Context) error +} + +type prFunc struct{ pause, resume func(context.Context) error } + +func (p prFunc) Pause(ctx context.Context) error { return p.pause(ctx) } +func (p prFunc) Resume(ctx context.Context) error { return p.resume(ctx) } + +var _ PauseResumer = prFunc{} + +// PauseResumerFunc is a convenience method that takes a pause and resume func +// and returns a PauseResumer. +func PauseResumerFunc(pause, resume func(context.Context) error) PauseResumer { + return prFunc{pause: pause, resume: resume} +} + +// MultiPauseResume will join multiple PauseResumers where +// all will be paused, or none. +// +// Any that pause successfully, when another fails, will +// have Resume called. +func MultiPauseResume(pr ...PauseResumer) PauseResumer { + pause := func(ctx context.Context) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + pass := make(chan struct{}) + fail := make(chan struct{}) + errCh := make(chan error, len(pr)) + resumeErrCh := make(chan error, len(pr)) + + doPause := func(p PauseResumer) { + err := errors.Wrapf(p.Pause(ctx), "pause") + errCh <- err + select { + case <-pass: + resumeErrCh <- nil + case <-fail: + if err == nil { + resumeErrCh <- errors.Wrapf(p.Resume(ctx), "resume") + } else { + resumeErrCh <- nil + } + } + } + + for _, p := range pr { + go doPause(p) + } + + var hasErr bool + var errs []error + for range pr { + err := <-errCh + if err != nil { + errs = append(errs, err) + if !hasErr { + cancel() + close(fail) + hasErr = true + } + } + } + if !hasErr { + close(pass) + } + for range pr { + err := <-resumeErrCh + if err != nil { + errs = append(errs, err) + } + } + if len(errs) > 0 { + return errors.Errorf("multiple errors: %v", errs) + } + + return nil + } + resume := func(ctx context.Context) error { + ch := make(chan error) + res := func(fn func(context.Context) error) { ch <- fn(ctx) } + for _, p := range pr { + go res(p.Resume) + } + var errs []error + for range pr { + err := <-ch + if err != nil { + errs = append(errs, err) + } + } + if len(errs) > 0 { + return errors.Errorf("multiple errors: %v", errs) + } + return nil + } + + return PauseResumerFunc(pause, resume) +} diff --git a/app/lifecycle/pauseresumer_test.go b/app/lifecycle/pauseresumer_test.go new file mode 100644 index 0000000000..a5278a975e --- /dev/null +++ b/app/lifecycle/pauseresumer_test.go @@ -0,0 +1,120 @@ +package lifecycle + +import ( + "context" + "testing" + "time" + + "github.com/pkg/errors" +) + +func buildPause() (func(error), PauseResumer) { + ch := make(chan error) + + return func(err error) { + ch <- err + }, + PauseResumerFunc( + func(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + case err := <-ch: + return err + } + }, + func(ctx context.Context) error { + return nil + }, + ) +} + +func TestMultiPauseResume(t *testing.T) { + t.Run("simple success", func(t *testing.T) { + to := time.NewTimer(time.Second) + defer to.Stop() + done1, pr1 := buildPause() + done2, pr2 := buildPause() + ctx := context.Background() + errCh := make(chan error) + go func() { errCh <- MultiPauseResume(pr1, pr2).Pause(ctx) }() + + done1(nil) + done2(nil) + + select { + case err := <-errCh: + if err != nil { + t.Errorf("got %v; want nil", err) + } + case <-to.C: + t.Fatal("never returned") + } + + }) + t.Run("external cancellation", func(t *testing.T) { + to := time.NewTimer(time.Second) + defer to.Stop() + + _, pr1 := buildPause() + _, pr2 := buildPause() + ctx, cancel := context.WithCancel(context.Background()) + errCh := make(chan error) + go func() { errCh <- MultiPauseResume(pr1, pr2).Pause(ctx) }() + + cancel() + + select { + case err := <-errCh: + if err == nil { + t.Error("got nil; want err") + } + case <-to.C: + t.Fatal("never returned") + } + }) + t.Run("external cancellation", func(t *testing.T) { + to := time.NewTimer(time.Second) + defer to.Stop() + + done1, pr1 := buildPause() + _, pr2 := buildPause() + ctx, cancel := context.WithCancel(context.Background()) + errCh := make(chan error) + go func() { errCh <- MultiPauseResume(pr1, pr2).Pause(ctx) }() + + done1(nil) + cancel() + + select { + case err := <-errCh: + if err == nil { + t.Error("got nil; want err") + } + case <-to.C: + t.Fatal("never returned") + } + }) + t.Run("external cancellation", func(t *testing.T) { + to := time.NewTimer(time.Second) + defer to.Stop() + + done1, pr1 := buildPause() + _, pr2 := buildPause() + ctx, cancel := context.WithCancel(context.Background()) + errCh := make(chan error) + go func() { errCh <- MultiPauseResume(pr1, pr2).Pause(ctx) }() + + done1(errors.New("okay")) + cancel() + + select { + case err := <-errCh: + if err == nil { + t.Error("got nil; want err") + } + case <-to.C: + t.Fatal("never returned") + } + }) +} diff --git a/app/listenevents.go b/app/listenevents.go new file mode 100644 index 0000000000..8f38df374b --- /dev/null +++ b/app/listenevents.go @@ -0,0 +1,78 @@ +package app + +import ( + "context" + "time" + + "github.com/lib/pq" + "github.com/target/goalert/permission" + "github.com/target/goalert/util/log" +) + +func (app *App) listenEvents(ctx context.Context) error { + channels := []string{"/goalert/config-refresh"} + + handle := func(l *pq.Listener) { + defer l.Close() + + for { + var n *pq.Notification + select { + case n = <-l.NotificationChannel(): + case <-ctx.Done(): + return + } + + log.Debugf(log.WithFields(ctx, log.Fields{ + "Channel": n.Channel, + "PID": n.BePid, + "Extra": n.Extra, + }), "NOTIFY") + + switch n.Channel { + case "/goalert/config-refresh": + permission.SudoContext(ctx, func(ctx context.Context) { + log.Log(ctx, app.ConfigStore.Reload(ctx)) + }) + } + } + } + + makeListener := func(url string) (*pq.Listener, error) { + l := pq.NewListener(app.cfg.DBURL, 3*time.Second, time.Minute, nil) + for _, ch := range channels { + err := l.Listen(ch) + if err != nil { + l.Close() + return nil, err + } + } + err := l.Ping() + if err != nil { + l.Close() + return nil, err + } + + return l, nil + } + + l, err := makeListener(app.cfg.DBURL) + if err != nil { + return err + } + var ln *pq.Listener + if app.cfg.DBURLNext != "" { + ln, err = makeListener(app.cfg.DBURLNext) + if err != nil { + l.Close() + return err + } + } + + go handle(l) + if ln != nil { + go handle(ln) + } + + return nil +} diff --git a/app/listenstatus.go b/app/listenstatus.go new file mode 100644 index 0000000000..f8e8db14bf --- /dev/null +++ b/app/listenstatus.go @@ -0,0 +1,51 @@ +package app + +import ( + "net" + + "github.com/pkg/errors" +) + +func listenStatus(addr string, done <-chan struct{}) error { + if addr == "" { + return nil + } + + l, err := net.Listen("tcp", addr) + if err != nil { + return errors.Wrap(err, "start status listener") + } + ch := make(chan net.Conn) + + go func() { + defer close(ch) + for { + c, err := l.Accept() + if err != nil { + return + } + ch <- c + } + }() + go func() { + var conn []net.Conn + loop: + for { + select { + case <-done: + l.Close() + break loop + case c := <-ch: + conn = append(conn, c) + } + } + for c := range ch { + c.Close() + } + for _, c := range conn { + c.Close() + } + }() + + return nil +} diff --git a/app/logexporter.go b/app/logexporter.go new file mode 100644 index 0000000000..9359b09f3d --- /dev/null +++ b/app/logexporter.go @@ -0,0 +1,20 @@ +package app + +import ( + "context" + "github.com/target/goalert/util/log" + + "go.opencensus.io/trace" +) + +type logExporter struct{} + +func (l *logExporter) ExportSpan(span *trace.SpanData) { + if !span.IsSampled() { + return + } + ctx := log.WithField(context.Background(), "RequestID", span.TraceID.String()) + for _, a := range span.Annotations { + log.Logf(log.WithFields(ctx, log.Fields(a.Attributes)), a.Message) + } +} diff --git a/app/middleware.go b/app/middleware.go new file mode 100644 index 0000000000..a890156710 --- /dev/null +++ b/app/middleware.go @@ -0,0 +1,169 @@ +package app + +import ( + "context" + "io" + "net/http" + "strings" + "time" + + "github.com/felixge/httpsnoop" + "github.com/pkg/errors" + "github.com/target/goalert/config" + "github.com/target/goalert/graphql" + "github.com/target/goalert/permission" + "github.com/target/goalert/util/log" +) + +type _reqInfoCtxKey string + +const reqInfoCtxKey = _reqInfoCtxKey("request-info-fields") + +func stripPrefixMiddleware() func(next http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + cfg := config.FromContext(r.Context()) + prefix := strings.TrimSuffix(cfg.General.PublicURL, "/") + r.URL.Path = strings.TrimPrefix(r.URL.Path, prefix) + next.ServeHTTP(w, r) + }) + } +} +func maxBodySizeMiddleware(size int64) func(next http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + if size == 0 { + return next + } + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + r.Body = http.MaxBytesReader(w, r.Body, size) + next.ServeHTTP(w, r) + }) + } +} + +type readLogger struct { + io.ReadCloser + n int +} + +func (r *readLogger) Read(p []byte) (int, error) { + n, err := r.ReadCloser.Read(p) + r.n += n + return n, err +} + +func logRequestAuth(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + extraFields := req.Context().Value(reqInfoCtxKey).(*log.Fields) + *extraFields = log.ContextFields(req.Context()) + next.ServeHTTP(w, req) + }) +} + +func logRequest(alwaysLog bool) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + ctx = log.SetRequestID(ctx) + ctx = log.WithFields(ctx, log.Fields{ + "http_method": req.Method, + "http_proto": req.Proto, + "remote_addr": req.RemoteAddr, + "host": req.Host, + "uri": req.URL.Path, + "referer": req.Referer(), + "x_forwarded_for": req.Header.Get("x-forwarded-for"), + "x_forwarded_host": req.Header.Get("x-forwarded-host"), + }) + + // We need to include a struct in the context, that can be modified within child context. + // + // This is not really a proper use of context, however we have no good post-request-handler + // hook we can use, therefore we use a defer call to log. Since said defer is called with + // the context BEFORE we get to graphql, it can only reference values created before. + // + // This will do until we take a new approach to request logging that doesn't have the same issues. + ctx = context.WithValue(ctx, graphql.RequestInfoContextKey, &graphql.RequestInfo{}) + + // Logging auth info in request + ctx = context.WithValue(ctx, reqInfoCtxKey, &log.Fields{}) + + rLog := &readLogger{ReadCloser: req.Body} + req.Body = rLog + + var serveError interface{} + metrics := httpsnoop.CaptureMetricsFn(w, func(w http.ResponseWriter) { + defer func() { + serveError = recover() + }() + next.ServeHTTP(w, req.WithContext(ctx)) + }) + + if serveError != nil && metrics.Written == 0 { + http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) + metrics.Code = 500 + } + + checks, _ := permission.AuthCheckCount(ctx) + + extraFields := ctx.Value(reqInfoCtxKey).(*log.Fields) + ctx = log.WithFields(ctx, *extraFields) + status := metrics.Code + if status == 0 { + status = 200 + } + ctx = log.WithFields(ctx, log.Fields{ + "resp_bytes_length": metrics.Written, + "req_bytes_length": rLog.n, + "resp_elapsed_ms": metrics.Duration.Seconds() * 1000, + "resp_status": status, + "AuthCheckCount": checks, + }) + + // If we have request info, and non-empty queries/mutations lists, append them to the log context. + if info, ok := ctx.Value(graphql.RequestInfoContextKey).(*graphql.RequestInfo); ok && info != nil { + if len(info.Queries) > 0 { + ctx = log.WithField(ctx, "GraphQLQueries", strings.Join(info.Queries, ",")) + } + if len(info.Mutations) > 0 { + ctx = log.WithField(ctx, "GraphQLMutations", strings.Join(info.Mutations, ",")) + } + } + + if serveError != nil { + switch e := serveError.(type) { + case error: + log.Log(ctx, errors.Wrap(e, "request panic")) + default: + log.Log(ctx, errors.Errorf("request panic: %v", e)) + } + return + } + if alwaysLog && req.URL.Path != "/health" { + log.Logf(ctx, "request complete") + } else { + log.Debugf(ctx, "request complete") + } + }) + } +} + +func authCheckLimit(max int) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + next.ServeHTTP(w, req.WithContext( + permission.AuthCheckCountContext(req.Context(), uint64(max)), + )) + }) + } +} + +func timeout(timeout time.Duration) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + ctx, cancel := context.WithTimeout(req.Context(), timeout) + defer cancel() + next.ServeHTTP(w, req.WithContext(ctx)) + }) + } +} diff --git a/app/middlewaregzip.go b/app/middlewaregzip.go new file mode 100644 index 0000000000..49d62d1094 --- /dev/null +++ b/app/middlewaregzip.go @@ -0,0 +1,67 @@ +package app + +import ( + "compress/gzip" + "io" + "net/http" + "strings" + "sync" + + "github.com/felixge/httpsnoop" +) + +var gzPool = sync.Pool{New: func() interface{} { return gzip.NewWriter(nil) }} + +// wrapGzip will wrap an http.Handler to respond with gzip encoding. +func wrapGzip(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if !strings.Contains(req.Header.Get("Accept-Encoding"), "gzip") || req.Header.Get("Range") != "" { + // Normal pass-through if gzip isn't accepted, there's no content type, or a Range is requested. + // + // Not going to handle the whole Transfer-Encoding vs Content-Encoding stuff -- just disable + // gzip in this case. + next.ServeHTTP(w, req) + return + } + + // If gzip is asked for, and we're not already replying with gzip + // then wrap it. This is important as if we are proxying + // UI assets (for example) we don't want to re-compress an already + // compressed payload. + + var output io.Writer + var check sync.Once + cleanup := func() {} + getOutput := func() { + if w.Header().Get("Content-Encoding") != "" || w.Header().Get("Content-Type") == "" { + // already encoded + output = w + return + } + + gz := gzPool.Get().(*gzip.Writer) + gz.Reset(w) + w.Header().Set("Content-Encoding", "gzip") + w.Header().Set("Vary", "Accept-Encoding") + w.Header().Del("Content-Length") + cleanup = func() { + gz.Close() + gzPool.Put(gz) + } + output = gz + } + + ww := httpsnoop.Wrap(w, httpsnoop.Hooks{ + WriteHeader: func(next httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { check.Do(getOutput); return next }, + Write: func(next httpsnoop.WriteFunc) httpsnoop.WriteFunc { + return func(b []byte) (int, error) { check.Do(getOutput); return output.Write(b) } + }, + ReadFrom: func(next httpsnoop.ReadFromFunc) httpsnoop.ReadFromFunc { + return func(src io.Reader) (int64, error) { check.Do(getOutput); return io.Copy(output, src) } + }, + }) + + defer func() { cleanup() }() + next.ServeHTTP(ww, req) + }) +} diff --git a/app/pause.go b/app/pause.go new file mode 100644 index 0000000000..b72f4e11bb --- /dev/null +++ b/app/pause.go @@ -0,0 +1,54 @@ +package app + +import ( + "context" + "github.com/target/goalert/switchover" + "github.com/target/goalert/util/log" + "net/http" + + "go.opencensus.io/trace" +) + +func (app *App) pauseHandler(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + err := app.requestLock.RLock(ctx) + if err != nil { + log.Log(ctx, err) + return + } + defer app.requestLock.RUnlock() + next.ServeHTTP(w, req) + }) +} + +func (app *App) Pause(ctx context.Context) error { + ctx, sp := trace.StartSpan(ctx, "App.Pause") + defer sp.End() + + err := app.mgr.Pause(ctx) + if err != nil { + return err + } + app.db.SetMaxIdleConns(0) + return nil +} +func (app *App) Resume() { + app.db.SetMaxIdleConns(app.cfg.DBMaxIdle) + app.mgr.Resume(context.Background()) +} +func (app *App) _pause(ctx context.Context) error { + cfg := switchover.ConfigFromContext(ctx) + if cfg.NoPauseAPI { + return nil + } + err := app.requestLock.Lock(ctx) + if err != nil { + return err + } + return nil +} +func (app *App) _resume(ctx context.Context) error { + app.requestLock.Unlock() + return nil +} diff --git a/app/recoverexporter.go b/app/recoverexporter.go new file mode 100644 index 0000000000..1cd337ebbb --- /dev/null +++ b/app/recoverexporter.go @@ -0,0 +1,39 @@ +package app + +import ( + "context" + "github.com/target/goalert/util/log" + + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +type recoverExporter struct { + exp trace.Exporter +} + +func (r recoverExporter) ExportSpan(s *trace.SpanData) { + defer func() { + err := recover() + if err != nil { + log.Log(context.Background(), errors.Errorf("export span (panic): %+v", err)) + } + }() + r.exp.ExportSpan(s) +} +func (r recoverExporter) Flush() { + type flusher interface { + Flush() + } + f, ok := r.exp.(flusher) + if !ok { + return + } + defer func() { + err := recover() + if err != nil { + log.Log(context.Background(), errors.Errorf("flush exporter (panic): %+v", err)) + } + }() + f.Flush() +} diff --git a/app/runapp.go b/app/runapp.go new file mode 100644 index 0000000000..6f3035d1e6 --- /dev/null +++ b/app/runapp.go @@ -0,0 +1,48 @@ +package app + +import ( + "context" + "net/http" + "os" + + "github.com/target/goalert/util/log" + + "github.com/pkg/errors" +) + +var triggerSignals []os.Signal + +// Run will start the application and start serving traffic. +func (app *App) Run(ctx context.Context) error { + return app.mgr.Run(ctx) +} + +func (app *App) _Run(ctx context.Context) error { + go func() { + err := app.engine.Run(ctx) + if err != nil { + log.Log(ctx, err) + } + }() + log.Logf( + log.WithFields(context.TODO(), log.Fields{ + "address": app.l.Addr().String(), + "url": app.ConfigStore.Config().PublicURL(), + }), + "Listening.", + ) + + eventCtx, cancel := context.WithCancel(ctx) + defer cancel() + err := app.listenEvents(eventCtx) + if err != nil { + return err + } + + err = app.srv.Serve(app.l) + if err != nil && err != http.ErrServerClosed { + return errors.Wrap(err, "serve HTTP") + } + + return nil +} diff --git a/app/shutdown.go b/app/shutdown.go new file mode 100644 index 0000000000..565985674b --- /dev/null +++ b/app/shutdown.go @@ -0,0 +1,73 @@ +package app + +import ( + "context" + "os" + "time" + + "github.com/pkg/errors" +) + +// Shutdown will cause the App to begin a graceful shutdown, using +// the provided context for any cleanup operations. +func (app *App) Shutdown(ctx context.Context) error { + return app.mgr.Shutdown(ctx) +} + +func (app *App) _Shutdown(ctx context.Context) error { + defer close(app.doneCh) + defer app.db.Close() + var errs []error + + if app.cooldown != nil { + // wait for the cooldown (since last req closed) + app.cooldown.WaitContext(ctx) + } + + if app.srv != nil { + errs = append(errs, errors.Wrap(app.srv.Shutdown(ctx), "shutdown HTTP server")) + } + errs = append(errs, errors.Wrap(app.l.Close(), "close listening socket")) + + if app.engine != nil { + errs = append(errs, errors.Wrap(app.engine.Shutdown(ctx), "shutdown engine")) + } + + if app.notificationManager != nil { + errs = append(errs, errors.Wrap(app.notificationManager.Shutdown(ctx), "shutdown notification manager")) + } + + if app.SessionKeyring != nil { + errs = append(errs, errors.Wrap(app.SessionKeyring.Shutdown(ctx), "shutdown session keyring")) + } + + if app.OAuthKeyring != nil { + errs = append(errs, errors.Wrap(app.OAuthKeyring.Shutdown(ctx), "shutdown oauth keyring")) + } + + if app.NonceStore != nil { + errs = append(errs, errors.Wrap(app.NonceStore.Shutdown(ctx), "shutdown nonce store")) + } + + // filter out nil values + shutdownErrs := errs[:0] + for _, e := range errs { + if e == nil { + continue + } + shutdownErrs = append(shutdownErrs, e) + } + + if len(shutdownErrs) == 1 { + return shutdownErrs[0] + } + if len(shutdownErrs) > 1 { + return errors.Errorf("multiple shutdown errors: %+v", shutdownErrs) + } + + return nil +} + +var shutdownSignals = []os.Signal{os.Interrupt} + +const shutdownTimeout = time.Minute * 2 diff --git a/app/shutdownsignals_unix.go b/app/shutdownsignals_unix.go new file mode 100644 index 0000000000..1445f0fbd1 --- /dev/null +++ b/app/shutdownsignals_unix.go @@ -0,0 +1,10 @@ +package app + +import ( + "syscall" +) + +func init() { + shutdownSignals = append(shutdownSignals, syscall.SIGTERM) + triggerSignals = append(triggerSignals, syscall.SIGUSR2) +} diff --git a/app/startup.go b/app/startup.go new file mode 100644 index 0000000000..03df3ab140 --- /dev/null +++ b/app/startup.go @@ -0,0 +1,94 @@ +package app + +import ( + "context" + "time" + + "github.com/target/goalert/app/lifecycle" + "github.com/target/goalert/migrate" + "github.com/target/goalert/notification" + "github.com/target/goalert/retry" + "github.com/target/goalert/util/log" + + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +func (app *App) initStartup(ctx context.Context, label string, fn func(context.Context) error) { + if app.startupErr != nil { + return + } + + ctx, sp := trace.StartSpan(ctx, label) + defer sp.End() + err := fn(ctx) + if err != nil { + sp.Annotate([]trace.Attribute{trace.BoolAttribute("error", true)}, err.Error()) + app.startupErr = errors.Wrap(err, label) + } +} + +func (app *App) startup(ctx context.Context) error { + ctx, sp := trace.StartSpan(ctx, "Startup") + defer sp.End() + + app.initStartup(ctx, "Startup.TestDBConn", func(ctx context.Context) error { + err := app.db.PingContext(ctx) + if err == nil { + return nil + } + + t := time.NewTicker(time.Second) + defer t.Stop() + for retry.IsTemporaryError(err) { + log.Log(ctx, err) + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.C: + err = app.db.PingContext(ctx) + } + } + + return err + }) + + app.initStartup(ctx, "Startup.MigrateDB", func(ctx context.Context) error { + n, err := migrate.ApplyAll(log.EnableDebug(ctx), app.db) + if err != nil { + return errors.Wrap(err, "apply migrations") + } + if n > 0 { + log.Logf(ctx, "Applied %d migrations.", n) + } + return nil + }) + + app.notificationManager = notification.NewManager() + if app.cfg.StubNotifiers { + app.notificationManager.SetStubNotifiers() + } + + app.initStartup(ctx, "Startup.DBStores", app.initStores) + + // init twilio before engine + app.initStartup( + ctx, "Startup.Twilio", app.initTwilio) + + app.initStartup(ctx, "Startup.Slack", app.initSlack) + + app.initStartup(ctx, "Startup.Engine", app.initEngine) + app.initStartup(ctx, "Startup.GraphQL", app.initGraphQL) + app.initStartup(ctx, "Startup.Auth", app.initAuth) + + app.initStartup(ctx, "Startup.HTTPServer", app.initHTTP) + + if app.startupErr != nil { + return app.startupErr + } + + return app.mgr.SetPauseResumer(lifecycle.MultiPauseResume( + app.engine, + lifecycle.PauseResumerFunc(app._pause, app._resume), + )) +} diff --git a/app/tracing.go b/app/tracing.go new file mode 100644 index 0000000000..7fe106c701 --- /dev/null +++ b/app/tracing.go @@ -0,0 +1,78 @@ +package app + +import ( + "context" + "github.com/target/goalert/util/log" + + "cloud.google.com/go/compute/metadata" + "contrib.go.opencensus.io/exporter/stackdriver" + "github.com/pkg/errors" + "go.opencensus.io/exporter/jaeger" + "go.opencensus.io/trace" + "google.golang.org/genproto/googleapis/api/monitoredres" +) + +func configTracing(ctx context.Context, c appConfig) ([]trace.Exporter, error) { + var exporters []trace.Exporter + if c.JaegerEndpoint != "" || c.JaegerAgentEndpoint != "" { + exporter, err := jaeger.NewExporter(jaeger.Options{ + Endpoint: c.JaegerEndpoint, + AgentEndpoint: c.JaegerAgentEndpoint, + ServiceName: "goalert", + }) + if err != nil { + return nil, errors.Wrap(err, "init jaeger exporter") + } + e := c.wrapExporter(exporter) + exporters = append(exporters, e) + trace.RegisterExporter(recoverExporter{exp: e}) + } + + if c.StackdriverProjectID != "" { + opts := stackdriver.Options{ + ProjectID: c.StackdriverProjectID, + } + if c.TracingClusterName != "" { + instanceID, err := metadata.InstanceID() + if err != nil { + log.Log(ctx, errors.Wrap(err, "get instance ID")) + instanceID = "unknown" + } + zone, err := metadata.Zone() + if err != nil { + log.Log(ctx, errors.Wrap(err, "get zone")) + zone = "unknown" + } + opts.Resource = &monitoredres.MonitoredResource{ + Type: "gke_container", + Labels: map[string]string{ + "project_id": c.StackdriverProjectID, + "cluster_name": c.TracingClusterName, + "instance_id": instanceID, + "zone": zone, + + // See: https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/ + "namespace_id": c.TracingPodNamespace, + "pod_id": c.TracingPodName, + "container_name": c.TracingContainerName, + }, + } + } + exporter, err := stackdriver.NewExporter(opts) + if err != nil { + return nil, errors.Wrap(err, "init stackdriver exporter") + } + exporters = append(exporters, exporter) + trace.RegisterExporter(recoverExporter{exp: exporter}) + } + + trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(c.TraceProbability)}) + + if c.LogTraces { + e := c.wrapExporter(&logExporter{}) + exporters = append(exporters, e) + trace.RegisterExporter(recoverExporter{exp: e}) + } + + return exporters, nil +} diff --git a/app/trigger.go b/app/trigger.go new file mode 100644 index 0000000000..d15111ad3a --- /dev/null +++ b/app/trigger.go @@ -0,0 +1,12 @@ +package app + +import "context" + +// Trigger will start a processing cycle (normally ever ~5s) +func (app *App) Trigger() { + app.mgr.WaitForStartup(context.Background()) + + if app.engine != nil { + app.engine.Trigger() + } +} diff --git a/app/version.go b/app/version.go new file mode 100644 index 0000000000..426a5c2d38 --- /dev/null +++ b/app/version.go @@ -0,0 +1,9 @@ +package app + +var ( + gitVersion = "dev" + gitTreeState = "unknown" + gitCommit = "?" + + buildDate = "1970-01-01T00:00:00Z" +) diff --git a/assignment/assignment.go b/assignment/assignment.go new file mode 100644 index 0000000000..a4f68ba472 --- /dev/null +++ b/assignment/assignment.go @@ -0,0 +1,6 @@ +package assignment + +type Assignment interface { + Source + Target +} diff --git a/assignment/doc.go b/assignment/doc.go new file mode 100644 index 0000000000..4635bf27e8 --- /dev/null +++ b/assignment/doc.go @@ -0,0 +1,7 @@ +/* package assignment handles updating and resolving entity assignments + + +Everything from Alerts down must resolve to one or more contact methods in the end. +*/ + +package assignment diff --git a/assignment/source.go b/assignment/source.go new file mode 100644 index 0000000000..1d7487d03f --- /dev/null +++ b/assignment/source.go @@ -0,0 +1,74 @@ +package assignment + +import "strconv" + +// Source contains information about the source, or subject of an assignment. +type Source interface { + SourceType() SrcType + SourceID() string +} +type RawSource struct { + Type SrcType + ID string +} + +func NewRawSource(s Source) RawSource { + return RawSource{Type: s.SourceType(), ID: s.SourceID()} +} +func (rt RawSource) SourceType() SrcType { + return rt.Type +} +func (rt RawSource) SourceID() string { + return rt.ID +} + +type ( + // AlertSource implements the Source interface by wrapping an Alert ID. + AlertSource int + // EscalationPolicyStepSource implements the Source interface by wrapping an EsclationPolicyStep ID. + EscalationPolicyStepSource string + // RotationParticipantSource implements the Source interface by wrapping a RotationParticipant ID. + RotationParticipantSource string + // ScheduleRuleSource implements the Source interface by wrapping a ScheduleRule ID. + ScheduleRuleSource string + // ServiceSource implements the Source interface by wrapping a Service ID. + ServiceSource string + // UserSource implements the Source interface by wrapping a UserSource ID. + UserSource string +) + +// SourceType implements the Source interface. +func (AlertSource) SourceType() SrcType { return SrcTypeAlert } + +// SourceID implements the Source interface. +func (a AlertSource) SourceID() string { return strconv.Itoa(int(a)) } + +// SourceType implements the Source interface. +func (EscalationPolicyStepSource) SourceType() SrcType { return SrcTypeEscalationPolicyStep } + +// SourceID implements the Source interface. +func (e EscalationPolicyStepSource) SourceID() string { return string(e) } + +// SourceType implements the Source interface. +func (RotationParticipantSource) SourceType() SrcType { return SrcTypeRotationParticipant } + +// SourceID implements the Source interface. +func (r RotationParticipantSource) SourceID() string { return string(r) } + +// SourceType implements the Source interface. +func (ScheduleRuleSource) SourceType() SrcType { return SrcTypeScheduleRule } + +// SourceID implements the Source interface. +func (s ScheduleRuleSource) SourceID() string { return string(s) } + +// SourceType implements the Source interface. +func (ServiceSource) SourceType() SrcType { return SrcTypeService } + +// SourceID implements the Source interface. +func (s ServiceSource) SourceID() string { return string(s) } + +// SourceType implements the Source interface. +func (UserSource) SourceType() SrcType { return SrcTypeUser } + +// SourceID implements the Source interface. +func (u UserSource) SourceID() string { return string(u) } diff --git a/assignment/srctype.go b/assignment/srctype.go new file mode 100644 index 0000000000..f1ee3b9a27 --- /dev/null +++ b/assignment/srctype.go @@ -0,0 +1,32 @@ +package assignment + +// SrcType represents the source-type of an assignment. +type SrcType int + +// Available SrcTypes +const ( + SrcTypeUnspecified SrcType = iota + SrcTypeAlert + SrcTypeEscalationPolicyStep + SrcTypeRotationParticipant + SrcTypeScheduleRule + SrcTypeService + SrcTypeUser +) + +func (s SrcType) ParentType() TargetType { + switch s { + case SrcTypeEscalationPolicyStep: + return TargetTypeEscalationPolicy + case SrcTypeRotationParticipant: + return TargetTypeRotation + case SrcTypeScheduleRule: + return TargetTypeSchedule + case SrcTypeService: + return TargetTypeService + case SrcTypeUser: + return TargetTypeUser + } + + return TargetTypeUnspecified +} diff --git a/assignment/srctype_string.go b/assignment/srctype_string.go new file mode 100644 index 0000000000..ea414d9aa8 --- /dev/null +++ b/assignment/srctype_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type SrcType"; DO NOT EDIT. + +package assignment + +import "fmt" + +const _SrcType_name = "SrcTypeUnspecifiedSrcTypeAlertSrcTypeEscalationPolicyStepSrcTypeRotationParticipantSrcTypeScheduleRuleSrcTypeServiceSrcTypeUser" + +var _SrcType_index = [...]uint8{0, 18, 30, 57, 83, 102, 116, 127} + +func (i SrcType) String() string { + if i < 0 || i >= SrcType(len(_SrcType_index)-1) { + return fmt.Sprintf("SrcType(%d)", i) + } + return _SrcType_name[_SrcType_index[i]:_SrcType_index[i+1]] +} diff --git a/assignment/target.go b/assignment/target.go new file mode 100644 index 0000000000..aa7bbbcc2e --- /dev/null +++ b/assignment/target.go @@ -0,0 +1,124 @@ +package assignment + +// Target contains information about the target, or assignee of an assignment. +type Target interface { + TargetType() TargetType + TargetID() string +} +type RawTarget struct { + Type TargetType `json:"target_type"` + ID string `json:"target_id"` + Name string `json:"target_name"` +} + +func NewRawTarget(t Target) RawTarget { + return RawTarget{Type: t.TargetType(), ID: t.TargetID()} +} +func (rt RawTarget) TargetType() TargetType { + return rt.Type +} +func (rt RawTarget) TargetID() string { + return rt.ID +} + +// TargetName returns the name of the target. If unavailable, an empty string is returned. +func (rt RawTarget) TargetName() string { + return rt.Name +} + +// TargetNamer allows getting the friendly name of a target. +// Note: TargetName may return an empty string if the name is unavailable. +type TargetNamer interface { + TargetName() string +} + +type ( + // EscalationPolicyTarget implements the Target interface by wrapping an EscalationPolicy ID. + EscalationPolicyTarget string + // NotificationPolicyTarget implements the Target interface by wrapping a NotificationPolicy ID. + NotificationPolicyTarget string + // RotationTarget implements the Target interface by wrapping a Rotation ID. + RotationTarget string + // ServiceTarget implements the Target interface by wrapping a Service ID. + ServiceTarget string + // ScheduleTarget implements the Target interface by wrapping a Schedule ID. + ScheduleTarget string + // UserTarget implements the Target interface by wrapping a User ID. + UserTarget string + // NotificationChannelTarget implements the Target interface by wrapping a notification channel ID. + NotificationChannelTarget string + // IntegrationKeyTarget implements the Target interface by wrapping an IntegrationKey ID. + IntegrationKeyTarget string + // UserOverrideTarget implements the Target interface by wrapping an UserOverride ID. + UserOverrideTarget string + // ContactMethodTarget implements the Target interface by wrapping a ContactMethod ID. + ContactMethodTarget string + // NotificationRuleTarget implements the Target interface by wrapping an NotificationRule ID. + NotificationRuleTarget string +) + +// TargetType implements the Target interface. +func (EscalationPolicyTarget) TargetType() TargetType { return TargetTypeEscalationPolicy } + +// TargetID implements the Target interface. +func (e EscalationPolicyTarget) TargetID() string { return string(e) } + +// TargetType implements the Target interface. +func (NotificationPolicyTarget) TargetType() TargetType { return TargetTypeNotificationPolicy } + +// TargetID implements the Target interface. +func (n NotificationPolicyTarget) TargetID() string { return string(n) } + +// TargetType implements the Target interface. +func (RotationTarget) TargetType() TargetType { return TargetTypeRotation } + +// TargetID implements the Target interface. +func (r RotationTarget) TargetID() string { return string(r) } + +// TargetType implements the Target interface. +func (ServiceTarget) TargetType() TargetType { return TargetTypeService } + +// TargetID implements the Target interface. +func (s ServiceTarget) TargetID() string { return string(s) } + +// TargetType implements the Target interface. +func (ScheduleTarget) TargetType() TargetType { return TargetTypeSchedule } + +// TargetID implements the Target interface. +func (s ScheduleTarget) TargetID() string { return string(s) } + +// TargetType implements the Target interface. +func (UserTarget) TargetType() TargetType { return TargetTypeUser } + +// TargetID implements the Target interface. +func (u UserTarget) TargetID() string { return string(u) } + +// TargetType implements the Target interface. +func (NotificationChannelTarget) TargetType() TargetType { return TargetTypeNotificationChannel } + +// TargetID implements the Target interface. +func (nc NotificationChannelTarget) TargetID() string { return string(nc) } + +// TargetType implements the Target interface. +func (IntegrationKeyTarget) TargetType() TargetType { return TargetTypeIntegrationKey } + +// TargetID implements the Target interface. +func (k IntegrationKeyTarget) TargetID() string { return string(k) } + +// TargetType implements the Target interface. +func (UserOverrideTarget) TargetType() TargetType { return TargetTypeUserOverride } + +// TargetID implements the Target interface. +func (k UserOverrideTarget) TargetID() string { return string(k) } + +// TargetType implements the Target interface. +func (ContactMethodTarget) TargetType() TargetType { return TargetTypeContactMethod } + +// TargetID implements the Target interface. +func (k ContactMethodTarget) TargetID() string { return string(k) } + +// TargetType implements the Target interface. +func (NotificationRuleTarget) TargetType() TargetType { return TargetTypeNotificationRule } + +// TargetID implements the Target interface. +func (k NotificationRuleTarget) TargetID() string { return string(k) } diff --git a/assignment/targettype.go b/assignment/targettype.go new file mode 100644 index 0000000000..0e65070421 --- /dev/null +++ b/assignment/targettype.go @@ -0,0 +1,99 @@ +package assignment + +//go:generate go run golang.org/x/tools/cmd/stringer -type TargetType + +import ( + "github.com/target/goalert/validation" + "io" + + "github.com/99designs/gqlgen/graphql" +) + +// TargetType represents the destination type of an assignment +type TargetType int + +// Assignment destination types +const ( + TargetTypeUnspecified TargetType = iota + TargetTypeEscalationPolicy + TargetTypeNotificationPolicy + TargetTypeRotation + TargetTypeService + TargetTypeSchedule + TargetTypeUser + TargetTypeNotificationChannel + TargetTypeSlackChannel + TargetTypeIntegrationKey + TargetTypeUserOverride + TargetTypeNotificationRule + TargetTypeContactMethod +) + +// UnmarshalGQL implements the graphql.Marshaler interface +func (tt *TargetType) UnmarshalGQL(v interface{}) error { + str, err := graphql.UnmarshalString(v) + if err != nil { + return err + } + + switch str { + case "escalationPolicy": + *tt = TargetTypeEscalationPolicy + case "notificationPolicy": + *tt = TargetTypeNotificationPolicy + case "rotation": + *tt = TargetTypeRotation + case "service": + *tt = TargetTypeService + case "schedule": + *tt = TargetTypeSchedule + case "user": + *tt = TargetTypeUser + case "integrationKey": + *tt = TargetTypeIntegrationKey + case "notificationChannel": + *tt = TargetTypeNotificationChannel + case "slackChannel": + *tt = TargetTypeSlackChannel + case "userOverride": + *tt = TargetTypeUserOverride + case "contactMethod": + *tt = TargetTypeContactMethod + case "notificationRule": + *tt = TargetTypeNotificationRule + default: + return validation.NewFieldError("TargetType", "unknown target type "+str) + } + + return nil +} + +// MarshalGQL implements the graphql.Marshaler interface +func (tt TargetType) MarshalGQL(w io.Writer) { + switch tt { + case TargetTypeEscalationPolicy: + graphql.MarshalString("escalationPolicy").MarshalGQL(w) + case TargetTypeNotificationPolicy: + graphql.MarshalString("notificationPolicy").MarshalGQL(w) + case TargetTypeRotation: + graphql.MarshalString("rotation").MarshalGQL(w) + case TargetTypeService: + graphql.MarshalString("service").MarshalGQL(w) + case TargetTypeSchedule: + graphql.MarshalString("schedule").MarshalGQL(w) + case TargetTypeUser: + graphql.MarshalString("user").MarshalGQL(w) + case TargetTypeIntegrationKey: + graphql.MarshalString("integrationKey").MarshalGQL(w) + case TargetTypeUserOverride: + graphql.MarshalString("userOverride").MarshalGQL(w) + case TargetTypeNotificationChannel: + graphql.MarshalString("notificationChannel").MarshalGQL(w) + case TargetTypeSlackChannel: + graphql.MarshalString("slackChannel").MarshalGQL(w) + case TargetTypeContactMethod: + graphql.MarshalString("contactMethod").MarshalGQL(w) + case TargetTypeNotificationRule: + graphql.MarshalString("notificationRule").MarshalGQL(w) + } +} diff --git a/assignment/targettype_string.go b/assignment/targettype_string.go new file mode 100644 index 0000000000..5c76fb9b7f --- /dev/null +++ b/assignment/targettype_string.go @@ -0,0 +1,35 @@ +// Code generated by "stringer -type TargetType"; DO NOT EDIT. + +package assignment + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TargetTypeUnspecified-0] + _ = x[TargetTypeEscalationPolicy-1] + _ = x[TargetTypeNotificationPolicy-2] + _ = x[TargetTypeRotation-3] + _ = x[TargetTypeService-4] + _ = x[TargetTypeSchedule-5] + _ = x[TargetTypeUser-6] + _ = x[TargetTypeNotificationChannel-7] + _ = x[TargetTypeSlackChannel-8] + _ = x[TargetTypeIntegrationKey-9] + _ = x[TargetTypeUserOverride-10] + _ = x[TargetTypeNotificationRule-11] + _ = x[TargetTypeContactMethod-12] +} + +const _TargetType_name = "TargetTypeUnspecifiedTargetTypeEscalationPolicyTargetTypeNotificationPolicyTargetTypeRotationTargetTypeServiceTargetTypeScheduleTargetTypeUserTargetTypeNotificationChannelTargetTypeSlackChannelTargetTypeIntegrationKeyTargetTypeUserOverrideTargetTypeNotificationRuleTargetTypeContactMethod" + +var _TargetType_index = [...]uint16{0, 21, 47, 75, 93, 110, 128, 142, 171, 193, 217, 239, 265, 288} + +func (i TargetType) String() string { + if i < 0 || i >= TargetType(len(_TargetType_index)-1) { + return "TargetType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _TargetType_name[_TargetType_index[i]:_TargetType_index[i+1]] +} diff --git a/auth/basic/config.go b/auth/basic/config.go new file mode 100644 index 0000000000..f3cf919f3e --- /dev/null +++ b/auth/basic/config.go @@ -0,0 +1,5 @@ +package basic + +// Config configures the basic auth provider +type Config struct { +} diff --git a/auth/basic/db.go b/auth/basic/db.go new file mode 100644 index 0000000000..9546f7775a --- /dev/null +++ b/auth/basic/db.go @@ -0,0 +1,89 @@ +package basic + +import ( + "context" + "database/sql" + "fmt" + "github.com/target/goalert/permission" + "github.com/target/goalert/util" + "github.com/target/goalert/validation/validate" + + "github.com/pkg/errors" + "golang.org/x/crypto/bcrypt" +) + +// Store can create new user/pass links and validate a username and password. bcrypt is used +// for password storage & verification. +type Store struct { + insert *sql.Stmt + getByUsername *sql.Stmt +} + +const tableName = "auth_basic_users" +const passCost = 14 + +// NewStore creates a new DB. Error is returned if the prepared statements fail to register. +func NewStore(ctx context.Context, db *sql.DB) (*Store, error) { + p := &util.Prepare{ + DB: db, + Ctx: ctx, + } + return &Store{ + insert: p.P(fmt.Sprintf("INSERT INTO %s(user_id, username, password_hash) VALUES ($1, $2, $3)", tableName)), + getByUsername: p.P(fmt.Sprintf("SELECT user_id, password_hash FROM %s WHERE username = $1", tableName)), + }, p.Err +} + +// CreateTx should add a new entry for the username/password combination linking to userID. +// An error is returned if the username is not unique or the userID is invalid. +// Must have same user or admin role. +func (b *Store) CreateTx(ctx context.Context, tx *sql.Tx, userID, username, password string) error { + err := permission.LimitCheckAny(ctx, permission.System, permission.Admin, permission.MatchUser(userID)) + if err != nil { + return err + } + + err = validate.Many( + validate.UUID("UserID", userID), + validate.UserName("UserName", username), + validate.Text("Password", password, 8, 200), + ) + if err != nil { + return err + } + + hashedPassword, err := bcrypt.GenerateFromPassword([]byte(password), passCost) + if err != nil { + return err + } + _, err = tx.StmtContext(ctx, b.insert).ExecContext(ctx, userID, username, string(hashedPassword)) + return err +} + +// Validate should return a userID if the username and password match. +func (b *Store) Validate(ctx context.Context, username, password string) (string, error) { + err := validate.Many( + validate.UserName("UserName", username), + validate.Text("Password", password, 1, 200), + ) + if err != nil { + return "", err + } + + row := b.getByUsername.QueryRowContext(ctx, username) + var userID, hashed string + err = row.Scan(&userID, &hashed) + if err != nil { + if err == sql.ErrNoRows { + return "", errors.New("invalid username") + } + return "", errors.WithMessage(err, "user lookup failure") + } + + err = bcrypt.CompareHashAndPassword([]byte(hashed), []byte(password)) + if err != nil { + return "", errors.WithMessage(err, "invalid password") + } + + return userID, nil +} diff --git a/auth/basic/doc.go b/auth/basic/doc.go new file mode 100644 index 0000000000..90f19c0b8f --- /dev/null +++ b/auth/basic/doc.go @@ -0,0 +1,2 @@ +// Package basic implements a simple auth provider and backend that identifies a user via username & password combination. +package basic diff --git a/auth/basic/identityprovider.go b/auth/basic/identityprovider.go new file mode 100644 index 0000000000..8377577624 --- /dev/null +++ b/auth/basic/identityprovider.go @@ -0,0 +1,58 @@ +package basic + +import ( + "context" + "net/http" + + "github.com/pkg/errors" + "github.com/target/goalert/auth" + "github.com/target/goalert/config" + "github.com/target/goalert/util/log" + "github.com/target/goalert/validation/validate" +) + +// Info implements the auth.Provider interface. +func (Provider) Info(ctx context.Context) auth.ProviderInfo { + cfg := config.FromContext(ctx) + return auth.ProviderInfo{ + Title: "Basic", + Fields: []auth.Field{ + {ID: "username", Label: "Username", Required: true}, + {ID: "password", Label: "Password", Password: true, Required: true}, + }, + Enabled: !cfg.Auth.DisableBasic, + } +} + +func userPass(req *http.Request) (string, string) { + if req.URL.User == nil { + return req.FormValue("username"), req.FormValue("password") + } + + p, _ := req.URL.User.Password() + return req.URL.User.Username(), p +} + +// ExtractIdentity implements the auth.IdentityProvider interface, providing identity based +// on the given username and password fields. +func (p *Provider) ExtractIdentity(route *auth.RouteInfo, w http.ResponseWriter, req *http.Request) (*auth.Identity, error) { + ctx := req.Context() + + username, password := userPass(req) + err := validate.UserName("Username", username) + if err != nil { + return nil, auth.Error("invalid username") + } + ctx = log.WithField(ctx, "username", username) + + _, err = p.b.Validate(ctx, username, password) + if err != nil { + log.Debug(ctx, errors.Wrap(err, "basic login")) + auth.Delay(ctx) + return nil, auth.Error("unknown username/password") + } + + return &auth.Identity{ + SubjectID: username, + }, nil +} diff --git a/auth/basic/provider.go b/auth/basic/provider.go new file mode 100644 index 0000000000..615f5f59b2 --- /dev/null +++ b/auth/basic/provider.go @@ -0,0 +1,21 @@ +package basic + +import ( + "context" + "database/sql" +) + +// Provider implements the auth.IdentityProvider interface. +type Provider struct { + b *Store +} + +// NewProvider creates a new Provider with the associated config. +func NewProvider(ctx context.Context, db *sql.DB) (*Provider, error) { + b, err := NewStore(ctx, db) + if err != nil { + return nil, err + } + + return &Provider{b: b}, nil +} diff --git a/auth/cookies.go b/auth/cookies.go new file mode 100644 index 0000000000..b5f11a4f03 --- /dev/null +++ b/auth/cookies.go @@ -0,0 +1,38 @@ +package auth + +import ( + "net/http" + "time" + + "github.com/target/goalert/config" +) + +// SetCookie will set a cookie value for all API prefixes, respecting the current config parameters. +func SetCookie(w http.ResponseWriter, req *http.Request, name, value string) { + SetCookieAge(w, req, name, value, 0) +} + +// SetCookieAge behaves like SetCookie but also sets the MaxAge. +func SetCookieAge(w http.ResponseWriter, req *http.Request, name, value string, age time.Duration) { + cfg := config.FromContext(req.Context()) + http.SetCookie(w, &http.Cookie{ + HttpOnly: true, + Secure: cfg.AuthSecure(), + Name: name, + + // Until we can finish removing /v1 from all UI calls + // we need cookies available on both /api and /v1. + // + // Unfortunately we can't just set both paths without breaking integration tests... + // We'll keep this as `/` until Cypress fixes it's cookie handling, or we + // finish removing the `/v1` UI code. Whichever is sooner. + Path: "/", + Value: value, + MaxAge: int(age.Seconds()), + }) +} + +// ClearCookie will clear and expire the cookie with the given name, for all API prefixes. +func ClearCookie(w http.ResponseWriter, req *http.Request, name string) { + SetCookieAge(w, req, name, "", -time.Second) +} diff --git a/auth/faildelay.go b/auth/faildelay.go new file mode 100644 index 0000000000..2d8da5beaa --- /dev/null +++ b/auth/faildelay.go @@ -0,0 +1,37 @@ +package auth + +import ( + "context" + cRand "crypto/rand" + "encoding/binary" + "math/rand" + "time" +) + +var failRand *rand.Rand + +const ( + delayMax = 10 * time.Millisecond +) + +func init() { + var seed int64 + err := binary.Read(cRand.Reader, binary.BigEndian, &seed) + if err != nil { + panic(err) + } + failRand = rand.New(rand.NewSource(seed)) +} + +// Delay will block for a random delay (or until the context is Done). +// +// It is useful in situations where there has been an auth failure. +func Delay(ctx context.Context) { + dur := time.Duration(failRand.Int63n(int64(delayMax))) + t := time.NewTicker(dur) + defer t.Stop() + select { + case <-ctx.Done(): + case <-t.C: + } +} diff --git a/auth/gettoken.go b/auth/gettoken.go new file mode 100644 index 0000000000..68d5ce7b2f --- /dev/null +++ b/auth/gettoken.go @@ -0,0 +1,49 @@ +package auth + +import ( + "net/http" + "strings" +) + +// GetToken will return the auth token associated with a request. +// +// Supported options (in priority order): +// - `token` (field or query) +// - Authorization: Bearer header +func GetToken(req *http.Request) string { + tok := req.FormValue("token") + if tok != "" { + return tok + } + + // compat + tok = req.FormValue("integrationKey") + if tok != "" { + return tok + } + + // compat + tok = req.FormValue("integration_key") + if tok != "" { + return tok + } + + // compat + tok = req.FormValue("key") + if tok != "" { + return tok + } + + tok = strings.TrimPrefix(req.Header.Get("Authorization"), "Bearer ") + if tok != "" { + return tok + } + + // compat + _, tok, _ = req.BasicAuth() + if tok != "" { + return tok + } + + return "" +} diff --git a/auth/github/config.go b/auth/github/config.go new file mode 100644 index 0000000000..fb1f9667a9 --- /dev/null +++ b/auth/github/config.go @@ -0,0 +1,13 @@ +package github + +import ( + "github.com/target/goalert/auth/nonce" + "github.com/target/goalert/keyring" +) + +// Config is used to configure the GitHub OAuth2 provider. If none of Organization, Teams, or Users are +// specified as criteria, any valid user will be accepted. +type Config struct { + Keyring keyring.Keyring + NonceStore nonce.Store +} diff --git a/auth/github/doc.go b/auth/github/doc.go new file mode 100644 index 0000000000..1b3b93ac33 --- /dev/null +++ b/auth/github/doc.go @@ -0,0 +1,2 @@ +// Package github implements an auth provider and backend that identifies a user via github account. +package github diff --git a/auth/github/identityprovider.go b/auth/github/identityprovider.go new file mode 100644 index 0000000000..0416446577 --- /dev/null +++ b/auth/github/identityprovider.go @@ -0,0 +1,234 @@ +package github + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/binary" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/google/go-github/github" + "github.com/pkg/errors" + "github.com/target/goalert/auth" + "github.com/target/goalert/config" + "github.com/target/goalert/util/log" + "golang.org/x/oauth2" +) + +const stateCookieName = "goalert_github_auth_state" + +// Info implements the auth.Provider interface. +func (Provider) Info(ctx context.Context) auth.ProviderInfo { + cfg := config.FromContext(ctx) + return auth.ProviderInfo{ + Title: "GitHub", + Enabled: cfg.GitHub.Enable, + } +} + +func (p *Provider) newStateToken() (string, error) { + buf := bytes.NewBuffer(nil) + buf.WriteByte('N') + + tok := p.c.NonceStore.New() + buf.Write(tok[:]) + + binary.Write(buf, binary.BigEndian, time.Now().Unix()) + + sig, err := p.c.Keyring.Sign(buf.Bytes()) + if err != nil { + return "", err + } + buf.Write(sig) + + return base64.URLEncoding.EncodeToString(buf.Bytes()), nil +} + +func (p *Provider) validateStateToken(ctx context.Context, s string) (bool, error) { + data, err := base64.URLEncoding.DecodeString(s) + if err != nil { + return false, err + } + if len(data) < 25 { + return false, nil + } + valid, _ := p.c.Keyring.Verify(data[:25], data[25:]) + if !valid { + return false, nil + } + if data[0] != 'N' { + return false, nil + } + var id [16]byte + copy(id[:], data[1:]) + + unix := int64(binary.BigEndian.Uint64(data[17:])) + t := time.Unix(unix, 0) + if time.Since(t) > time.Hour { + return false, nil + } + if time.Until(t) > time.Minute*5 { + // too far in the future (clock drift) + return false, nil + } + + return p.c.NonceStore.Consume(ctx, id) +} + +// ExtractIdentity implements the auth.IdentityProvider interface handling both auth and callback endpoints. +func (p *Provider) ExtractIdentity(route *auth.RouteInfo, w http.ResponseWriter, req *http.Request) (*auth.Identity, error) { + ctx := req.Context() + cfg := config.FromContext(ctx) + + switch route.RelativePath { + case "/": + tok, err := p.newStateToken() + if err != nil { + log.Log(req.Context(), errors.Wrap(err, "generate new state token")) + return nil, auth.Error("Failed to generate state token.") + } + + auth.SetCookie(w, req, stateCookieName, tok) + u := authConfig(ctx).AuthCodeURL(tok, oauth2.ApprovalForce) + + return nil, auth.RedirectURL(u) + case "/callback": + // handled below + default: + return nil, auth.Error("Invalid callback URL specified in GitHub application config.") + } + + tokStr := req.FormValue("state") + stateCookie, err := req.Cookie("goalert_github_auth_state") + if err != nil || stateCookie.Value != tokStr { + return nil, auth.Error("Invalid state token.") + } + auth.ClearCookie(w, req, stateCookieName) + + valid, err := p.validateStateToken(req.Context(), tokStr) + if err != nil { + log.Log(req.Context(), errors.Wrap(err, "validate state token")) + return nil, auth.Error("Could not validate state token.") + } + if !valid { + return nil, auth.Error("Invalid state token.") + } + + errorDesc := req.FormValue("error_description") + if errorDesc != "" { + return nil, auth.Error(errorDesc) + } + + oaCfg := authConfig(ctx) + + tok, err := oaCfg.Exchange(ctx, req.FormValue("code")) + if err != nil { + return nil, auth.Error("Failed to get token from GitHub.") + } + + if !tok.Valid() { + return nil, auth.Error("Invalid token returned from GitHub.") + } + + c := oaCfg.Client(ctx, tok) + g := github.NewClient(c) + if cfg.GitHub.EnterpriseURL != "" { + g.BaseURL, err = url.Parse(strings.TrimSuffix(cfg.GitHub.EnterpriseURL, "/") + "/api/v3/") + if err != nil { + return nil, err + } + } + + u, _, err := g.Users.Get(ctx, "") + if err != nil { + log.Debug(ctx, errors.Wrap(err, "fetch GitHub user")) + return nil, auth.Error("Failed to fetch user profile from GitHub.") + } + + var inUsers bool + login := strings.ToLower(u.GetLogin()) + ctx = log.WithFields(ctx, log.Fields{ + "github_id": u.GetID(), + "github_login": u.GetLogin(), + "github_name": u.GetName(), + }) + + for _, u := range cfg.GitHub.AllowedUsers { + if u == "*" || login == strings.ToLower(u) { + inUsers = true + break + } + } + + var inOrg bool + if !inUsers && len(cfg.GitHub.AllowedOrgs) > 0 { + for _, o := range cfg.GitHub.AllowedOrgs { + if strings.Contains(o, "/") { + //skip teams (process below) + continue + } + m, _, err := g.Organizations.IsMember(ctx, o, login) + if err != nil { + log.Log(ctx, errors.Wrap(err, "fetch GitHub org membership")) + return nil, auth.Error("Failed to read GitHub org membership") + } + if m { + inOrg = true + ctx = log.WithField(ctx, "github_org", o) + log.Debugf(ctx, "GitHub Auth matched org") + break + } + } + + if !inOrg { + + opt := &github.ListOptions{} + teams := make([]string, 0, 30) + + CheckTeams: + for { + tm, resp, err := g.Organizations.ListUserTeams(ctx, opt) + if err != nil { + log.Log(ctx, errors.Wrap(err, "fetch GitHub teams")) + return nil, auth.Error("Failed to read GitHub team membership") + } + for _, t := range tm { + teamName := strings.ToLower(t.Organization.GetLogin()) + "/" + strings.ToLower(t.GetSlug()) + teams = append(teams, teamName) + if containsOrg(cfg.GitHub.AllowedOrgs, teamName) { + inOrg = true + ctx = log.WithField(ctx, "github_team", teamName) + log.Debugf(ctx, "GitHub Auth matched team") + break CheckTeams + } + } + if resp.NextPage == 0 { + break + } + opt.Page = resp.NextPage + } + + // if still no match, log everything + if !inOrg { + log.Debugf(log.WithFields(ctx, log.Fields{ + "AllowedOrgs": cfg.GitHub.AllowedOrgs, + "TeamMemberships": teams, + }), "not in any matching team or org") + } + } + } + + if !inUsers && !inOrg { + return nil, auth.Error("Not a member of an allowed org or whitelisted user.") + } + + return &auth.Identity{ + Email: u.GetEmail(), + Name: u.GetName(), + SubjectID: strconv.FormatInt(u.GetID(), 10), + }, nil +} diff --git a/auth/github/provider.go b/auth/github/provider.go new file mode 100644 index 0000000000..856e9f4091 --- /dev/null +++ b/auth/github/provider.go @@ -0,0 +1,57 @@ +package github + +import ( + "context" + "strings" + + "github.com/target/goalert/config" + "golang.org/x/oauth2" + o2Github "golang.org/x/oauth2/github" +) + +// Provider will respond to /auth and /callback endpoints for the purposes of GitHub OAuth2 authentication. +type Provider struct { + c Config +} + +func authConfig(ctx context.Context) *oauth2.Config { + cfg := config.FromContext(ctx) + + authURL := o2Github.Endpoint.AuthURL + tokenURL := o2Github.Endpoint.TokenURL + if cfg.GitHub.EnterpriseURL != "" { + authURL = strings.TrimSuffix(cfg.GitHub.EnterpriseURL, "/") + "/login/oauth/authorize" + tokenURL = strings.TrimSuffix(cfg.GitHub.EnterpriseURL, "/") + "/login/oauth/access_token" + } + scopes := []string{"read:user"} + if len(cfg.GitHub.AllowedOrgs) > 0 { + scopes = append(scopes, "read:org") + } + return &oauth2.Config{ + ClientID: cfg.GitHub.ClientID, + ClientSecret: cfg.GitHub.ClientSecret, + Scopes: scopes, + Endpoint: oauth2.Endpoint{ + AuthURL: authURL, + TokenURL: tokenURL, + }, + } +} + +// NewProvider will validate Config and create a new Provider. If Enabled is false, validation +// will be skipped. +func NewProvider(ctx context.Context, c *Config) (*Provider, error) { + return &Provider{ + c: *c, + }, nil +} + +func containsOrg(orgs []string, name string) bool { + name = strings.ToLower(name) + for _, o := range orgs { + if strings.ToLower(o) == name { + return true + } + } + return false +} diff --git a/auth/handler.go b/auth/handler.go new file mode 100644 index 0000000000..aa55983cee --- /dev/null +++ b/auth/handler.go @@ -0,0 +1,595 @@ +package auth + +import ( + "bytes" + "context" + "database/sql" + "encoding/base64" + "encoding/json" + "io" + "net/http" + "net/url" + "sort" + "strings" + "time" + + "github.com/pkg/errors" + uuid "github.com/satori/go.uuid" + "github.com/target/goalert/config" + "github.com/target/goalert/integrationkey" + "github.com/target/goalert/keyring" + "github.com/target/goalert/permission" + "github.com/target/goalert/user" + "github.com/target/goalert/util" + "github.com/target/goalert/util/errutil" + "github.com/target/goalert/util/log" + "github.com/target/goalert/validation" + "github.com/target/goalert/validation/validate" + "go.opencensus.io/trace" +) + +// CookieName is the name of the auth session cookie. +const CookieName = "goalert_session.2" +const v1CookieName = "goalert_session" + +type registeredProvider struct { + // ID is the unique identifier of the provider. + ID string + + // URL is the location of the form action (POST). + URL string + + ProviderInfo +} + +// HandlerConfig provides configuration for the auth handler. +type HandlerConfig struct { + UserStore user.Store + SessionKeyring keyring.Keyring + IntKeyStore integrationkey.Store +} + +// Handler will serve authentication requests for registered identity providers. +type Handler struct { + providers map[string]IdentityProvider + cfg HandlerConfig + + db *sql.DB + userLookup *sql.Stmt + addSubject *sql.Stmt + updateUA *sql.Stmt + + startSession *sql.Stmt + fetchSession *sql.Stmt + endSession *sql.Stmt +} + +// NewHandler creates a new Handler using the provided config. +func NewHandler(ctx context.Context, db *sql.DB, cfg HandlerConfig) (*Handler, error) { + p := &util.Prepare{ + DB: db, + Ctx: ctx, + } + + h := &Handler{ + providers: make(map[string]IdentityProvider), + db: db, + + cfg: cfg, + + userLookup: p.P(` + select user_id + from auth_subjects + where + provider_id = $1 and + subject_id = $2 + `), + addSubject: p.P(` + insert into auth_subjects (provider_id, subject_id, user_id) + values ($1, $2, $3) + `), + startSession: p.P(` + insert into auth_user_sessions (id, user_agent, user_id) + values ($1, $2, $3) + `), + endSession: p.P(` + delete from auth_user_sessions + where id = $1 + `), + + updateUA: p.P(` + update auth_user_sessions + set user_agent = $2 + where id = $1 + `), + + fetchSession: p.P(` + select sess.user_id, u.role + from auth_user_sessions sess + join users u on u.id = sess.user_id + where sess.id = $1 + `), + } + + return h, p.Err +} + +// ServeLogout will clear the current session cookie and end the session (if any). +func (h *Handler) ServeLogout(w http.ResponseWriter, req *http.Request) { + h.setSessionCookie(w, req, "") + ctx := req.Context() + src := permission.Source(ctx) + if src != nil && src.Type == permission.SourceTypeAuthProvider { + _, err := h.endSession.ExecContext(context.Background(), src.ID) + if err != nil { + log.Log(ctx, errors.Wrap(err, "end session")) + } + } +} + +// ServeProviders will return a list of the currently enabled identity providers. +func (h *Handler) ServeProviders(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Content-Type", "application/json") + ctx := req.Context() + info := make([]registeredProvider, 0, len(h.providers)) + + for id, p := range h.providers { + if !p.Info(ctx).Enabled { + continue + } + + info = append(info, registeredProvider{ + ID: id, + URL: "/api/v2/identity/providers/" + url.PathEscape(id), + ProviderInfo: p.Info(ctx), + }) + } + + sort.Slice(info, func(i, j int) bool { return info[i].ID < info[j].ID }) + data, err := json.Marshal(info) + if errutil.HTTPError(req.Context(), w, err) { + return + } + w.Write(data) +} + +// IdentityProviderHandler will return a handler for the given provider ID. +// +// It panics if the id has not been registerd with AddIdentityProvider. +func (h *Handler) IdentityProviderHandler(id string) http.HandlerFunc { + p, ok := h.providers[id] + if !ok { + panic("IdentityProvider " + id + " does not exist") + } + + return func(w http.ResponseWriter, req *http.Request) { + ctx, sp := trace.StartSpan(req.Context(), "Auth.Provider/"+id) + defer sp.End() + + req = req.WithContext(ctx) + + var refU *url.URL + if req.Method == "POST" { + var ok bool + refU, ok = h.refererURL(w, req) + if !ok { + errutil.HTTPError(ctx, w, validation.NewFieldError("referer", "failed to resolve referer")) + return + } + } else { + c, err := req.Cookie("login_redir") + if err != nil { + errutil.HTTPError(ctx, w, validation.NewFieldError("login_redir", err.Error())) + return + } + refU, err = url.Parse(c.Value) + if err != nil { + errutil.HTTPError(ctx, w, validation.NewFieldError("login_redir", err.Error())) + return + } + } + + info := p.Info(ctx) + if !info.Enabled { + err := Error(info.Title + " auth disabled") + q := refU.Query() + sp.Annotate([]trace.Attribute{trace.BoolAttribute("error", true)}, "error: "+err.Error()) + q.Set("login_error", err.Error()) + refU.RawQuery = q.Encode() + http.Redirect(w, req, refU.String(), http.StatusFound) + return + } + + if req.Method == "POST" { + h.serveProviderPost(id, p, refU, w, req) + return + } + + h.handleProvider(id, p, refU, w, req) + } +} + +// A Redirector provides a target URL for redirecting a user. +type Redirector interface { + RedirectURL() string +} + +// RedirectURL is a convenience type that can be returned as an error +// resulting in redirection. It implements the error and Redirector interfaces. +type RedirectURL string + +// An Error can be returned to indicate an error message that should be displayed to +// the user attempting to authenticate. +type Error string + +// ClientError indicates an error meant for the client to see. +func (Error) ClientError() bool { return true } + +func (a Error) Error() string { return string(a) } + +func (RedirectURL) Error() string { return "must redirect to acquire identity" } + +// RedirectURL implements the Redirector interface. +func (r RedirectURL) RedirectURL() string { return string(r) } + +func (h *Handler) canCreateUser(ctx context.Context, providerID string) bool { + cfg := config.FromContext(ctx) + switch providerID { + case "oidc": + return cfg.OIDC.NewUsers + case "github": + return cfg.GitHub.NewUsers + } + + return false +} + +func (h *Handler) handleProvider(id string, p IdentityProvider, refU *url.URL, w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + sp := trace.FromContext(ctx) + cfg := config.FromContext(ctx) + + var route RouteInfo + route.RelativePath = strings.TrimPrefix(req.URL.Path, "/v1/identity/providers/"+id) + route.RelativePath = strings.TrimPrefix(route.RelativePath, "/api/v2/identity/providers/"+id) + if route.RelativePath == "" { + route.RelativePath = "/" + } + + urlBase := cfg.AuthReferer(refU.String()) + route.CurrentURL = strings.TrimSuffix(urlBase, "/") + req.URL.Path + + sub, err := p.ExtractIdentity(&route, w, req) + if r, ok := err.(Redirector); ok { + sp.Annotate([]trace.Attribute{trace.StringAttribute("auth.redirectURL", r.RedirectURL())}, "Redirected.") + http.Redirect(w, req, r.RedirectURL(), http.StatusFound) + return + } + noRedirect := req.FormValue("noRedirect") == "1" + + errRedirect := func(err error) { + q := refU.Query() + sp.Annotate([]trace.Attribute{trace.BoolAttribute("error", true)}, "error: "+err.Error()) + old := err + _, err = errutil.ScrubError(err) + if err != old { + log.Log(ctx, old) + } + q.Set("login_error", err.Error()) + refU.RawQuery = q.Encode() + if noRedirect { + if err != old { + w.WriteHeader(500) + } else { + w.WriteHeader(400) + } + io.WriteString(w, err.Error()) + return + } + http.Redirect(w, req, refU.String(), http.StatusFound) + } + + if err != nil { + errRedirect(err) + return + } + + var userID string + err = h.userLookup.QueryRowContext(ctx, id, sub.SubjectID).Scan(&userID) + if err == sql.ErrNoRows { + err = nil + } + if err != nil { + errRedirect(err) + return + } + + var newUser bool + if userID == "" { + newUser = true + + if !h.canCreateUser(ctx, id) { + errRedirect(Error("New users are not allowed right now, but you can try again later.")) + log.Log(ctx, errors.New("create user: disabled for provider")) + return + } + + // create user + tx, err := h.db.BeginTx(ctx, nil) + if err != nil { + errRedirect(err) + return + } + defer tx.Rollback() + u := &user.User{ + Role: permission.RoleUser, + Name: validate.SanitizeName(sub.Name), + Email: validate.SanitizeEmail(sub.Email), + } + permission.SudoContext(ctx, func(ctx context.Context) { + u, err = h.cfg.UserStore.InsertTx(ctx, tx, u) + }) + if err != nil { + errRedirect(err) + return + } + _, err = tx.Stmt(h.addSubject).ExecContext(ctx, id, sub.SubjectID, u.ID) + userID = u.ID + if err != nil { + errRedirect(err) + return + } + err = tx.Commit() + if err != nil { + errRedirect(err) + return + } + sp.Annotate([]trace.Attribute{ + trace.BoolAttribute("user.new", true), + trace.StringAttribute("user.name", u.Name), + trace.StringAttribute("user.email", u.Email), + trace.StringAttribute("user.id", u.ID), + }, "Created new user.") + } + + sessToken, sessID, err := h.CreateSession(ctx, req.UserAgent(), userID) + if err != nil { + errRedirect(err) + return + } + + sp.Annotate([]trace.Attribute{ + trace.BoolAttribute("auth.login", true), + trace.StringAttribute("auth.userID", userID), + trace.StringAttribute("auth.sessionID", sessID), + }, "User authenticated.") + + if noRedirect { + io.WriteString(w, sessToken) + return + } + + h.setSessionCookie(w, req, sessToken) + + if newUser { + q := refU.Query() + q.Set("isFirstLogin", "1") + refU.RawQuery = q.Encode() + } + + http.Redirect(w, req, refU.String(), http.StatusFound) +} + +// CreateSession will start a new session for the given UserID, returning a newly signed token. +func (h *Handler) CreateSession(ctx context.Context, userAgent, userID string) (token, id string, err error) { + sessID := uuid.NewV4() + _, err = h.startSession.ExecContext(ctx, sessID.String(), userAgent, userID) + if err != nil { + return "", "", err + } + + var buf bytes.Buffer + buf.WriteByte('S') // session IDs will be prefixed with an "S" + buf.Write(sessID.Bytes()) + sig, err := h.cfg.SessionKeyring.Sign(buf.Bytes()) + if err != nil { + return "", "", err + } + buf.Write(sig) + + return base64.URLEncoding.EncodeToString(buf.Bytes()), sessID.String(), nil +} + +func (h *Handler) setSessionCookie(w http.ResponseWriter, req *http.Request, val string) { + ClearCookie(w, req, "login_redir") + if val == "" { + ClearCookie(w, req, CookieName) + } else { + SetCookieAge(w, req, CookieName, val, 30*24*time.Hour) + } +} + +func (h *Handler) authWithToken(w http.ResponseWriter, req *http.Request, next http.Handler) bool { + tok := GetToken(req) + if tok == "" { + return false + } + + // TODO: update once scopes are implemented + ctx := req.Context() + var err error + switch req.URL.Path { + case "/v1/api/alerts", "/api/v2/generic/incoming": + ctx, err = h.cfg.IntKeyStore.Authorize(ctx, tok, integrationkey.TypeGeneric) + case "/v1/webhooks/grafana", "/api/v2/grafana/incoming": + ctx, err = h.cfg.IntKeyStore.Authorize(ctx, tok, integrationkey.TypeGrafana) + default: + return false + } + + if errutil.HTTPError(req.Context(), w, err) { + return true + } + + next.ServeHTTP(w, req.WithContext(ctx)) + return true +} + +// WrapHandler will wrap an existing http.Handler so the Context of the request +// includes authentication information (if the request is authorized). +// +// Updating and clearing the session cookie is automatically handled. +func (h *Handler) WrapHandler(wrapped http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if h.authWithToken(w, req, wrapped) { + return + } + + // User session flow + ctx := req.Context() + tok := GetToken(req) + var fromCookie bool + if tok == "" { + c, err := req.Cookie(CookieName) + if err == nil { + fromCookie = true + tok = c.Value + } + } + if tok == "" { + c, err := req.Cookie(v1CookieName) + if err == nil { + fromCookie = true + tok = c.Value + } + } + + if tok == "" { + // no cookie value + wrapped.ServeHTTP(w, req) + return + } + data, err := base64.URLEncoding.DecodeString(tok) + if err != nil { + if fromCookie { + h.setSessionCookie(w, req, "") + } + wrapped.ServeHTTP(w, req) + return + } + if len(data) == 0 || data[0] != 'S' || len(data) < 17 { + if fromCookie { + h.setSessionCookie(w, req, "") + } + wrapped.ServeHTTP(w, req) + return + } + + id, err := uuid.FromBytes(data[1:17]) + if err != nil { + if fromCookie { + h.setSessionCookie(w, req, "") + } + wrapped.ServeHTTP(w, req) + return + } + + valid, isOld := h.cfg.SessionKeyring.Verify(data[:17], data[17:]) + if !valid { + if fromCookie { + h.setSessionCookie(w, req, "") + } + wrapped.ServeHTTP(w, req) + return + } + if fromCookie && isOld { + // send new signature back if it was signed with an old key + sig, err := h.cfg.SessionKeyring.Sign(data[:17]) + if err != nil { + log.Log(ctx, errors.Wrap(err, "failed to sign/issue new session token")) + } else { + data = append(data[:17], sig...) + h.setSessionCookie(w, req, base64.URLEncoding.EncodeToString(data)) + + _, err = h.updateUA.ExecContext(ctx, id.String(), req.UserAgent()) + if err != nil { + log.Log(ctx, errors.Wrap(err, "update user agent (session key refresh)")) + } + } + } else if fromCookie { + // compat, always set cookie (for transition from /v1 to /api) + h.setSessionCookie(w, req, tok) + } + + var userID string + var userRole permission.Role + err = h.fetchSession.QueryRowContext(ctx, id.String()).Scan(&userID, &userRole) + if err == sql.ErrNoRows { + if fromCookie { + h.setSessionCookie(w, req, "") + } + wrapped.ServeHTTP(w, req) + return + } + if err != nil { + errutil.HTTPError(ctx, w, err) + return + } + + ctx = permission.UserSourceContext( + ctx, + userID, + userRole, + &permission.SourceInfo{ + Type: permission.SourceTypeAuthProvider, + ID: id.String(), + }, + ) + req = req.WithContext(ctx) + + wrapped.ServeHTTP(w, req) + }) +} + +func (h *Handler) refererURL(w http.ResponseWriter, req *http.Request) (*url.URL, bool) { + ref := req.Header.Get("referer") + ctx := req.Context() + cfg := config.FromContext(ctx) + refU, err := url.Parse(ref) + if err != nil { + errutil.HTTPError(ctx, w, validation.NewFieldError("referer", err.Error())) + return nil, false + } + + if cfg.AuthReferer(ref) == "" { + err := validation.NewFieldError("referer", "wrong host/path") + ctx = log.WithFields(ctx, log.Fields{ + "AuthRefererURLs": cfg.Auth.RefererURLs, + "PublicURL": cfg.PublicURL(), + }) + log.Log(ctx, err) + errutil.HTTPError(ctx, w, err) + return nil, false + } + + q := refU.Query() // reset existing login params + q.Del("isFirstLogin") + q.Del("login_error") + refU.RawQuery = q.Encode() + return refU, true +} +func (h *Handler) serveProviderPost(id string, p IdentityProvider, refU *url.URL, w http.ResponseWriter, req *http.Request) { + SetCookie(w, req, "login_redir", refU.String()) + + h.handleProvider(id, p, refU, w, req) +} + +// AddIdentityProvider registers a new IdentityProvider with the given ID. +func (h *Handler) AddIdentityProvider(id string, idp IdentityProvider) error { + if h.providers[id] != nil { + return errors.Errorf("provider already exists with id '%s'", id) + } + + h.providers[id] = idp + return nil +} diff --git a/auth/identityprovider.go b/auth/identityprovider.go new file mode 100644 index 0000000000..a142d12020 --- /dev/null +++ b/auth/identityprovider.go @@ -0,0 +1,62 @@ +package auth + +import ( + "context" + "net/http" +) + +// An IdentityProvider provides an option for a user to login (identify themselves). +// +// Examples include user/pass, OIDC, LDAP, etc.. +type IdentityProvider interface { + Info(context.Context) ProviderInfo + + ExtractIdentity(*RouteInfo, http.ResponseWriter, *http.Request) (*Identity, error) +} + +// Identity represents a user's proven identity. +type Identity struct { + // SubjectID should be a provider-specific identifier for an individual. + SubjectID string + Email string + EmailVerified bool + Name string +} + +// ProviderInfo holds the details for using a provider. +type ProviderInfo struct { + // Title is a user-viewable string for identifying this provider. + Title string + + // LogoURL is the optional URL of an icon to display with the provider. + LogoURL string `json:",omitempty"` + + // Fields holds a list of fields to include with the request. + // The order specified is the order displayed. + Fields []Field `json:",omitempty"` + + // Hidden indicates that the provider is not intended for user visibility. + Hidden bool `json:"-"` + + // Enabled indicates that the provider is currently turned on. + Enabled bool `json:"-"` +} + +// Field represents a single form field for authentication. +type Field struct { + // ID is the unique name/identifier of the field. + // It will be used as the key name in the POST request. + ID string + + // Label is the text displayed to the user for the field. + Label string + + // Required indicates a field that must not be empty. + Required bool + + // Password indicates the field should be treated as a password (gererally masked). + Password bool + + // Scannable indicates the field can be entered via QR-code scan. + Scannable bool +} diff --git a/auth/nonce/nonce.go b/auth/nonce/nonce.go new file mode 100644 index 0000000000..965717d5c2 --- /dev/null +++ b/auth/nonce/nonce.go @@ -0,0 +1,105 @@ +package nonce + +import ( + "context" + "database/sql" + "github.com/target/goalert/util" + "github.com/target/goalert/util/log" + "time" + + "github.com/pkg/errors" + uuid "github.com/satori/go.uuid" +) + +// Store allows generating and consuming nonce values. +type Store interface { + New() [16]byte + Consume(context.Context, [16]byte) (bool, error) + Shutdown(context.Context) error +} + +// DB implements the Store interface using postgres as it's backend. +type DB struct { + db *sql.DB + shutdown chan context.Context + + consume *sql.Stmt + cleanup *sql.Stmt +} + +// NewDB prepares a new DB instance for the given sql.DB. +func NewDB(ctx context.Context, db *sql.DB) (*DB, error) { + p := &util.Prepare{DB: db, Ctx: ctx} + + d := &DB{ + db: db, + shutdown: make(chan context.Context), + + consume: p.P(` + insert into auth_nonce (id) + values ($1) + on conflict do nothing + `), + cleanup: p.P(` + delete from auth_nonce + where created_at < now() - '1 week'::interval + `), + } + if p.Err != nil { + return nil, p.Err + } + go d.loop() + + return d, nil +} + +func (db *DB) loop() { + defer close(db.shutdown) + t := time.NewTicker(time.Hour * 24) + defer t.Stop() + for { + select { + case <-t.C: + _, err := db.cleanup.ExecContext(context.Background()) + if err != nil { + log.Log(context.Background(), errors.Wrap(err, "cleanup old nonce values")) + } + case <-db.shutdown: + return + } + } +} + +// Shutdown allows gracefully shutting down the nonce store. +func (db *DB) Shutdown(ctx context.Context) error { + if db == nil { + return nil + } + db.shutdown <- ctx + + // wait for it to complete + <-db.shutdown + return nil +} + +// New will generate a new cryptographically random nonce value. +func (db *DB) New() (id [16]byte) { + copy(id[:], uuid.NewV4().Bytes()) + return id +} + +// Consume will record the use of a nonce value. +// +// An error is returned if it is not possible to validate the nonce value. +// Otherwise true/false is returned to indicate if the id is valid. +// +// The first call to Consume for a given ID will return true, subsequent calls +// for the same ID will return false. +func (db *DB) Consume(ctx context.Context, id [16]byte) (bool, error) { + res, err := db.consume.ExecContext(ctx, uuid.UUID(id).String()) + if err != nil { + return false, err + } + n, _ := res.RowsAffected() + return n == 1, nil +} diff --git a/auth/oidc/config.go b/auth/oidc/config.go new file mode 100644 index 0000000000..d9d0986b66 --- /dev/null +++ b/auth/oidc/config.go @@ -0,0 +1,12 @@ +package oidc + +import ( + "github.com/target/goalert/auth/nonce" + "github.com/target/goalert/keyring" +) + +// Config provides necessary parameters for OIDC authentication. +type Config struct { + Keyring keyring.Keyring + NonceStore nonce.Store +} diff --git a/auth/oidc/identityprovider.go b/auth/oidc/identityprovider.go new file mode 100644 index 0000000000..93ce0db948 --- /dev/null +++ b/auth/oidc/identityprovider.go @@ -0,0 +1,287 @@ +package oidc + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/binary" + "fmt" + "net/http" + "strings" + "sync" + "time" + + oidc "github.com/coreos/go-oidc" + "github.com/pkg/errors" + "github.com/target/goalert/auth" + "github.com/target/goalert/config" + "github.com/target/goalert/util/log" + "go.opencensus.io/plugin/ochttp" + "go.opencensus.io/trace" + "golang.org/x/oauth2" +) + +var _ auth.IdentityProvider = &Provider{} + +const nonceCookieName = "goalert_oidc_nonce" + +var b64enc = base64.URLEncoding.WithPadding(base64.NoPadding) + +// Provider implements the auth.IdentityProvider interface by acting as a relying-party +// to a standard OIDC server. +type Provider struct { + cfg Config + + mx sync.Mutex + providers map[string]*oidc.Provider +} + +func (p *Provider) provider(ctx context.Context) (*oidc.Provider, error) { + cfg := config.FromContext(ctx) + p.mx.Lock() + defer p.mx.Unlock() + + provider, ok := p.providers[cfg.OIDC.IssuerURL] + if ok { + return provider, nil + } + + // oidc keeps the context and uses it after auto-discover is complete. + // Giving it context.Background is a workaround to allow fetching keys + // after init. + oidcCtx, sp := trace.StartSpanWithRemoteParent(context.Background(), "Auth.OIDC.NewProvider", trace.FromContext(ctx).SpanContext()) + provider, err := oidc.NewProvider(oidc.ClientContext(oidcCtx, &http.Client{Transport: &ochttp.Transport{}}), cfg.OIDC.IssuerURL) + sp.End() + if err != nil { + return nil, err + } + + p.providers[cfg.OIDC.IssuerURL] = provider + return provider, nil +} +func (p *Provider) oaConfig(ctx context.Context) (*oauth2.Config, *oidc.IDTokenVerifier, error) { + provider, err := p.provider(ctx) + if err != nil { + return nil, nil, err + } + cfg := config.FromContext(ctx) + + return &oauth2.Config{ + ClientID: cfg.OIDC.ClientID, + ClientSecret: cfg.OIDC.ClientSecret, + + Endpoint: provider.Endpoint(), + + // "openid" is a required scope for OpenID Connect flows. + Scopes: []string{oidc.ScopeOpenID, "profile", "email"}, + }, provider.Verifier(&oidc.Config{ClientID: cfg.OIDC.ClientID}), nil +} + +// NewProvider prepares a new Provider with the given config. +func NewProvider(ctx context.Context, cfg Config) (*Provider, error) { + if cfg.Keyring == nil { + return nil, errors.New("Keyring missing") + } + if cfg.NonceStore == nil { + return nil, errors.New("NonceStore missing") + } + + return &Provider{ + cfg: cfg, + providers: make(map[string]*oidc.Provider), + }, nil +} + +// Info returns the appropriate auth.ProviderInfo based on configuration. +// +// As OIDC requires no user input, only the Title is provided. +func (p *Provider) Info(ctx context.Context) auth.ProviderInfo { + cfg := config.FromContext(ctx) + title := "OIDC" + if cfg.OIDC.OverrideName != "" { + title = cfg.OIDC.OverrideName + } + return auth.ProviderInfo{ + Title: title, + Enabled: cfg.OIDC.Enable, + } +} + +func (p *Provider) newStateToken(nonceBytes []byte) (state string, err error) { + buf := bytes.NewBuffer(nil) + + buf.Write(nonceBytes[:]) + buf.WriteByte('N') + binary.Write(buf, binary.BigEndian, time.Now().Unix()) + + sig, err := p.cfg.Keyring.Sign(buf.Bytes()) + if err != nil { + return "", err + } + buf.Write(sig) + + // skip nonce for state token + buf.Next(len(nonceBytes)) + + return b64enc.EncodeToString(buf.Bytes()), nil +} + +func (p *Provider) validateStateToken(ctx context.Context, nonce []byte, state string) (bool, error) { + var buf bytes.Buffer + buf.Write(nonce[:]) + data, err := b64enc.DecodeString(state) + if err != nil { + return false, err + } + buf.Write(data) + data = buf.Bytes() + if len(data) < 25 { + return false, nil + } + valid, _ := p.cfg.Keyring.Verify(data[:25], data[25:]) + if !valid { + return false, nil + } + if data[16] != 'N' { + return false, nil + } + var id [16]byte + copy(id[:], data[1:]) + + unix := int64(binary.BigEndian.Uint64(data[17:])) + t := time.Unix(unix, 0) + if time.Since(t) > time.Hour { + return false, nil + } + if time.Until(t) > time.Minute*5 { + // too far in the future (clock drift) + return false, nil + } + + return true, nil +} + +// ExtractIdentity will return a redirect error for new auth requests, and provide a users identity +// for callback requests. +func (p *Provider) ExtractIdentity(route *auth.RouteInfo, w http.ResponseWriter, req *http.Request) (*auth.Identity, error) { + ctx := req.Context() + cfg := config.FromContext(ctx) + + name := "OIDC" + if cfg.OIDC.OverrideName != "" { + name = cfg.OIDC.OverrideName + } + + switch route.RelativePath { + case "/": + nonce := p.cfg.NonceStore.New() + stateToken, err := p.newStateToken(nonce[:]) + if err != nil { + log.Log(req.Context(), errors.Wrap(err, "generate new state token")) + return nil, auth.Error("Failed to generate state token.") + } + nonceStr := b64enc.EncodeToString(nonce[:]) + auth.SetCookie(w, req, nonceCookieName, nonceStr) + + oaCfg, _, err := p.oaConfig(ctx) + if err != nil { + return nil, err + } + oaCfg.RedirectURL = route.CurrentURL + "/callback" + + u := oaCfg.AuthCodeURL(stateToken, oidc.Nonce(nonceStr)) + return nil, auth.RedirectURL(u) + case "/callback": + // handled below + default: + return nil, auth.Error(fmt.Sprintf("Could not login due to wrong configuration for %s.", name)) + } + + stateToken := req.FormValue("state") + nonceC, err := req.Cookie(nonceCookieName) + if err != nil { + return nil, auth.Error("There was a problem recognizing this browser. You can try again") + } + auth.ClearCookie(w, req, nonceCookieName) + + nonce, err := b64enc.DecodeString(nonceC.Value) + if err != nil || len(nonce) != 16 { + // We can't guarantee the current browser is the one we sent for auth (CSRF/XSS potential) + return nil, auth.Error("There was a problem verifying this browser. You can try again") + } + valid, err := p.validateStateToken(req.Context(), nonce, stateToken) + if err != nil { + log.Log(req.Context(), errors.Wrap(err, "validate state token")) + return nil, auth.Error("There was a redirection problem. You can try again") + } + if !valid { + return nil, auth.Error("There was a problem while checking the request. You can try again") + } + + oaCfg, verifier, err := p.oaConfig(ctx) + if err != nil { + return nil, err + } + oaCfg.RedirectURL = route.CurrentURL + + oauth2Token, err := oaCfg.Exchange(ctx, req.URL.Query().Get("code")) + if err != nil { + log.Log(ctx, errors.Wrap(err, "exchange OIDC token")) + return nil, auth.Error(fmt.Sprintf("Could not communicate with %s server. You can try again", name)) + } + + // Extract the ID Token from OAuth2 token. + rawIDToken, ok := oauth2Token.Extra("id_token").(string) + if !ok { + log.Log(ctx, errors.New("id_token missing")) + return nil, auth.Error(fmt.Sprintf("Bad response from %s server.", name)) + } + + // Parse and verify ID Token payload. + idToken, err := verifier.Verify(ctx, rawIDToken) + if err != nil { + log.Log(ctx, errors.Wrap(err, "validate id_token")) + return nil, auth.Error(fmt.Sprintf("Invalid response from %s server.", name)) + } + + remoteNonce, err := b64enc.DecodeString(idToken.Nonce) + if err != nil || len(remoteNonce) != 16 || !bytes.Equal(remoteNonce, nonce) { + return nil, auth.Error(fmt.Sprintf("Invalid nonce from %s server.", name)) + } + var remoteNonceBytes [16]byte + copy(remoteNonceBytes[:], remoteNonce) + + ok, err = p.cfg.NonceStore.Consume(ctx, remoteNonceBytes) + if err != nil { + log.Log(ctx, errors.Wrap(err, "consume nonce value")) + return nil, auth.Error("Could not login. You can try again") + } + if !ok { + return nil, auth.Error("Could not login. You can try again") + } + + // Extract custom claims + var claims struct { + Name string `json:"name"` + GivenName string `json:"given_name"` + FamilyName string `json:"family_name"` + Email string `json:"email"` + Verified bool `json:"email_verified"` + } + if err := idToken.Claims(&claims); err != nil { + log.Log(ctx, errors.Wrap(err, "parse claims")) + return nil, auth.Error(fmt.Sprintf("Invalid response from %s server.", name)) + } + if claims.Name == "" { + // We *should* always get name with the profile scope, but fall back to joining the given and family names + // for misbehaving servers. + claims.Name = strings.TrimSpace(claims.GivenName + " " + claims.FamilyName) + } + + return &auth.Identity{ + Email: claims.Email, + Name: claims.Name, + EmailVerified: claims.Verified, + SubjectID: idToken.Subject, + }, nil +} diff --git a/auth/routeinfo.go b/auth/routeinfo.go new file mode 100644 index 0000000000..568ed9b667 --- /dev/null +++ b/auth/routeinfo.go @@ -0,0 +1,13 @@ +package auth + +// RouteInfo represents path information for the current request. +type RouteInfo struct { + // Relative provides a path, relative to the base of the current + // identity provider. + RelativePath string + + // CurrentURL is calculated using the AuthRefererURLs and + // the current auth attempt's referer. It does not include + // query parameters of the current request. + CurrentURL string +} diff --git a/cmd/goalert/main.go b/cmd/goalert/main.go new file mode 100644 index 0000000000..d1c95afbb6 --- /dev/null +++ b/cmd/goalert/main.go @@ -0,0 +1,18 @@ +package main + +import ( + "context" + "github.com/target/goalert/app" + "github.com/target/goalert/util/log" + "os" + + _ "github.com/joho/godotenv/autoload" +) + +func main() { + err := app.RootCmd.Execute() + if err != nil { + log.Log(context.TODO(), err) + os.Exit(1) + } +} diff --git a/config/config.go b/config/config.go new file mode 100644 index 0000000000..e46e7bb649 --- /dev/null +++ b/config/config.go @@ -0,0 +1,310 @@ +package config + +import ( + "fmt" + "net/url" + "regexp" + "strings" + + "github.com/target/goalert/validation" + "github.com/target/goalert/validation/validate" + + "github.com/pkg/errors" +) + +// SchemaVersion indicates the current config struct version. +const SchemaVersion = 1 + +// Config contains GoAlert application settings. +type Config struct { + data []byte + fallbackURL string + + General struct { + PublicURL string `info:"Publicly routable URL for UI links and API calls."` + GoogleAnalyticsID string `public:"true"` + NotificationDisclaimer string `public:"true" info:"Disclaimer text for receiving pre-recorded notifications (appears on profile page)."` + DisableLabelCreation bool `public:"true" info:"Disables the ability to create new labels for services."` + } + + Auth struct { + RefererURLs []string `info:"Allowed referer URLs for auth and redirects."` + DisableBasic bool `public:"true" info:"Disallow username/password login."` + } + + GitHub struct { + Enable bool `public:"true" info:"Enable GitHub authentication."` + + NewUsers bool `info:"Allow new user creation via GitHub authentication."` + + ClientID string + ClientSecret string `password:"true"` + + AllowedUsers []string `info:"Allow any of the listed GitHub usernames to authenticate. Use '*' to allow any user."` + AllowedOrgs []string `info:"Allow any member of any listed GitHub org (or team, using the format 'org/team') to authenticate."` + + EnterpriseURL string `info:"GitHub URL (without /api) when used with GitHub Enterprise."` + } + + OIDC struct { + Enable bool `public:"true" info:"Enable OpenID Connect authentication."` + + NewUsers bool `info:"Allow new user creation via OIDC authentication."` + OverrideName string `info:"Set the name/label on the login page to something other than OIDC."` + + IssuerURL string + ClientID string + ClientSecret string `password:"true"` + } + + Mailgun struct { + Enable bool `public:"true"` + + APIKey string `password:"true"` + EmailDomain string `info:"The TO address for all incoming alerts."` + } + + Slack struct { + Enable bool `public:"true"` + + ClientID string + ClientSecret string `password:"true"` + + AccessToken string `password:"true" info:"Slack app OAuth access token."` + } + + Twilio struct { + Enable bool `public:"true" info:"Enables sending and processing of Voice and SMS messages through the Twilio notification provider."` + + AccountSID string + AuthToken string `password:"true" info:"The primary Auth Token for Twilio. Must be primary (not secondary) for request valiation."` + FromNumber string `public:"true" info:"The Twilio number to use for outgoing notifications."` + } + + Feedback struct { + Enable bool `public:"true" info:"Enables Feedback link in nav bar."` + OverrideURL string `public:"true" info:"Use a custom URL for Feedback link in nav bar."` + } +} + +// CallbackURL will return a public-routable URL to the given path. +// It will use PublicURL() to fill in missing pieces. +// +// It will panic if provided an invalid URL. +func (cfg Config) CallbackURL(path string, mergeParams ...url.Values) string { + base, err := url.Parse(cfg.PublicURL()) + if err != nil { + panic(errors.Wrap(err, "parse PublicURL")) + } + + next, err := url.Parse(path) + if err != nil { + panic(errors.Wrap(err, "parse path")) + } + + base.Path = strings.TrimSuffix(base.Path, "/") + "/" + strings.TrimPrefix(next.Path, "/") + + params := base.Query() + nx := next.Query() + // set/override any params provided with path + for name, val := range nx { + params[name] = val + } + + // set/override with any additionally provided params + for _, merge := range mergeParams { + for name, val := range merge { + params[name] = val + } + } + + base.RawQuery = params.Encode() + return base.String() +} + +// AuthSecure returns true if the auth referer URLs are HTTPS. +func (cfg Config) AuthSecure() bool { + return strings.HasPrefix(cfg.authReferers()[0], "https://") +} + +// PublicURL will return the General.PublicURL or a fallback address (i.e. the app listening port). +func (cfg Config) PublicURL() string { + if cfg.General.PublicURL == "" { + return strings.TrimSuffix(cfg.fallbackURL, "/") + } + + return strings.TrimSuffix(cfg.General.PublicURL, "/") +} + +func (cfg Config) authReferers() []string { + if len(cfg.Auth.RefererURLs) == 0 { + return []string{cfg.PublicURL()} + } + return cfg.Auth.RefererURLs +} + +// AuthReferer will return the configured referer URL matching ref. If there +// are no matches, an empty string is returned. +func (cfg Config) AuthReferer(ref string) string { + for _, u := range cfg.authReferers() { + if strings.HasPrefix(ref, u) { + return u + } + } + + return "" +} + +var ( + mailgunKeyRx = regexp.MustCompile(`^key-[0-9a-f]{32}$`) + + slackClientIDRx = regexp.MustCompile(`^[0-9]{12}\.[0-9]{12}$`) + slackClientSecretRx = regexp.MustCompile(`^[0-9a-f]{32}$`) + slackUserAccessTokenRx = regexp.MustCompile(`^xoxp-[0-9]{12}-[0-9]{12}-[0-9]{12}-[0-9a-f]{32}$`) + slackBotAccessTokenRx = regexp.MustCompile(`^xoxb-[0-9]{12}-[0-9]{12}-[0-9A-Za-z]{24}$`) + + twilioAccountSIDRx = regexp.MustCompile(`^AC[0-9a-f]{32}$`) + twilioAuthTokenRx = regexp.MustCompile(`^[0-9a-f]{32}$`) + + githubClientIDRx = regexp.MustCompile(`^[0-9a-f]{20}$`) + githubClientSecretRx = regexp.MustCompile(`^[0-9a-f]{40}$`) +) + +func validateRx(field string, rx *regexp.Regexp, value, msg string) error { + if value == "" { + return nil + } + + if !rx.MatchString(value) { + return validation.NewFieldError(field, msg) + } + + return nil +} +func validateEnable(prefix string, isEnabled bool, vals ...string) error { + if !isEnabled { + return nil + } + + var err error + for i := 0; i < len(vals); i += 2 { + if vals[i+1] != "" { + continue + } + err = validate.Many( + err, + validation.NewFieldError(prefix+".Enable", fmt.Sprintf("requires %s.%s to be set ", prefix, vals[i])), + validation.NewFieldError(fmt.Sprintf("%s.%s", prefix, vals[i]), "required to enable "+prefix), + ) + } + + return err +} + +// Validate will check that the Config values are valid. +func (cfg Config) Validate() error { + var err error + if cfg.General.PublicURL != "" { + err = validate.Many( + err, + validate.AbsoluteURL("General.PublicURL", cfg.General.PublicURL), + ) + } + + err = validate.Many( + err, + validate.Text("General.NotificationDisclaimer", cfg.General.NotificationDisclaimer, 0, 500), + + validateRx("Mailgun.APIKey", mailgunKeyRx, cfg.Mailgun.APIKey, "should be of the format: 'key-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'"), + + validateRx("Slack.ClientID", slackClientIDRx, cfg.Slack.ClientID, "should be of the format: '############.############'"), + validateRx("Slack.ClientSecret", slackClientSecretRx, cfg.Slack.ClientSecret, "should be of the format: 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'"), + + validateRx("Twilio.AccountSID", twilioAccountSIDRx, cfg.Twilio.AccountSID, "should be of the format: 'ACxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'"), + validateRx("Twilio.AuthToken", twilioAuthTokenRx, cfg.Twilio.AuthToken, "should be of the format: 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'"), + + validateRx("GitHub.ClientID", githubClientIDRx, cfg.GitHub.ClientID, "should be of the format: 'xxxxxxxxxxxxxxxxxxxx'"), + validateRx("GitHub.ClientSecret", githubClientSecretRx, cfg.GitHub.ClientSecret, "should be of the format: 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'"), + ) + + if strings.HasPrefix(cfg.Slack.AccessToken, "xoxp") { + err = validate.Many(err, + validateRx("Slack.AccessToken", slackUserAccessTokenRx, cfg.Slack.AccessToken, "should be of the format: 'xoxb-############-############-zzzzzzzzzzzzzzzzzzzzzzzz'"), + ) + } else { + err = validate.Many(err, + validateRx("Slack.AccessToken", slackBotAccessTokenRx, cfg.Slack.AccessToken, "should be of the format: 'xoxb-############-############-zzzzzzzzzzzzzzzzzzzzzzzz'"), + ) + } + + if cfg.OIDC.IssuerURL != "" { + err = validate.Many(err, validate.AbsoluteURL("OIDC.IssuerURL", cfg.OIDC.IssuerURL)) + } + if cfg.GitHub.EnterpriseURL != "" { + err = validate.Many(err, validate.AbsoluteURL("GitHub.EnterpriseURL", cfg.GitHub.EnterpriseURL)) + } + if cfg.Twilio.FromNumber != "" { + err = validate.Many(err, validate.Phone("Twilio.FromNumber", cfg.Twilio.FromNumber)) + } + if cfg.Mailgun.EmailDomain != "" { + err = validate.Many(err, validate.Email("Mailgun.EmailDomain", "example@"+cfg.Mailgun.EmailDomain)) + } + + err = validate.Many( + err, + + validateEnable("Mailgun", cfg.Mailgun.Enable, + "APIKey", cfg.Mailgun.APIKey, + "EmailDomain", cfg.Mailgun.EmailDomain, + ), + + validateEnable("Slack", cfg.Slack.Enable, + "ClientID", cfg.Slack.ClientID, + "ClientSecret", cfg.Slack.ClientSecret, + ), + + validateEnable("Twilio", cfg.Twilio.Enable, + "AccountSID", cfg.Twilio.AccountSID, + "AuthToken", cfg.Twilio.AuthToken, + "FromNumber", cfg.Twilio.FromNumber, + ), + + validateEnable("GitHub", cfg.GitHub.Enable, + "ClientID", cfg.GitHub.ClientID, + "ClientSecret", cfg.GitHub.ClientSecret, + ), + + validateEnable("OIDC", cfg.OIDC.Enable, + "IssuerURL", cfg.OIDC.IssuerURL, + "ClientID", cfg.OIDC.ClientID, + "ClientSecret", cfg.OIDC.ClientSecret, + ), + ) + + if cfg.Feedback.OverrideURL != "" { + err = validate.Many( + err, + validate.AbsoluteURL("Feedback.OverrideURL", cfg.Feedback.OverrideURL), + ) + } + + var schema string + for i, urlStr := range cfg.Auth.RefererURLs { + field := fmt.Sprintf("Auth.RefererURLs[%d]", i) + err = validate.Many( + err, + validate.AbsoluteURL(field, urlStr), + ) + if schema == "" { + schema = strings.SplitN(urlStr, ":", 2)[0] + } else { + newSchema := strings.SplitN(urlStr, ":", 2)[0] + + if newSchema != schema { + err = validate.Many(err, validation.NewFieldError(field, "Refusing to accept both secure and insecure referers.")) + } + } + } + + return err +} diff --git a/config/context.go b/config/context.go new file mode 100644 index 0000000000..c467b467c4 --- /dev/null +++ b/config/context.go @@ -0,0 +1,19 @@ +package config + +import "context" + +type contextKey string + +var contextKeyConfig = contextKey("config") + +// Context returns a new Context that carries the provided Config. +func (cfg Config) Context(ctx context.Context) context.Context { + return context.WithValue(ctx, contextKeyConfig, cfg) +} + +// FromContext will return the Config carried in the provided Context. +// +// It panics if config is not available on the current context. +func FromContext(ctx context.Context) Config { + return ctx.Value(contextKeyConfig).(Config) +} diff --git a/config/mergejson.go b/config/mergejson.go new file mode 100644 index 0000000000..ae68035ac9 --- /dev/null +++ b/config/mergejson.go @@ -0,0 +1,50 @@ +package config + +import ( + "encoding/json" + "strings" + + "github.com/pkg/errors" +) + +func mergeJSON(dst, src []byte) ([]byte, error) { + var dstM, srcM map[string]interface{} + if len(dst) == 0 { + dstM = make(map[string]interface{}) + } else { + err := json.Unmarshal(dst, &dstM) + if err != nil { + return nil, err + } + } + err := json.Unmarshal(src, &srcM) + if err != nil { + return nil, err + } + + err = applyValues(dstM, srcM) + if err != nil { + return nil, err + } + + return json.Marshal(dstM) +} + +func applyValues(dst, src map[string]interface{}, prefix ...string) error { + for key, val := range src { + if valMap, ok := val.(map[string]interface{}); ok { + switch d := dst[key].(type) { + case nil: + dst[key] = valMap + case map[string]interface{}: + applyValues(d, valMap, append(prefix, key)...) + default: + return errors.Errorf("schema type mismatch: expected %s.%s in DB to be map[string]interface{} but was %T", strings.Join(prefix, "."), key, d) + } + continue + } + dst[key] = val + } + + return nil +} diff --git a/config/mergejson_test.go b/config/mergejson_test.go new file mode 100644 index 0000000000..0c93b37198 --- /dev/null +++ b/config/mergejson_test.go @@ -0,0 +1,18 @@ +package config + +import "testing" + +func TestMergeJSON(t *testing.T) { + const orig = `{"foo": "bar", "bin": "baz", "d": {"e": "f"}}` + const add = `{"bin":"", "ok": "then", "a":{"b":"c"}, "d":{"e":"g"}}` + const exp = `{"a":{"b":"c"},"bin":"","d":{"e":"g"},"foo":"bar","ok":"then"}` + + data, err := mergeJSON([]byte(orig), []byte(add)) + if err != nil { + t.Fatal(err) + } + + if string(data) != exp { + t.Fatalf("got '%s'; want '%s'", string(data), exp) + } +} diff --git a/config/source.go b/config/source.go new file mode 100644 index 0000000000..94ad5848cd --- /dev/null +++ b/config/source.go @@ -0,0 +1,21 @@ +package config + +import "net/http" + +// A Source will provide a snapshot of a Config struct. +type Source interface { + Config() Config +} + +// Static implements a config.Source that never changes it's values. +type Static Config + +// Config will return the current value of s. +func (s Static) Config() Config { return Config(s) } + +// Handler will return a new http.Handler that provides config to all requests. +func Handler(next http.Handler, src Source) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + next.ServeHTTP(w, req.WithContext(src.Config().Context(req.Context()))) + }) +} diff --git a/config/store.go b/config/store.go new file mode 100644 index 0000000000..c55048eaa5 --- /dev/null +++ b/config/store.go @@ -0,0 +1,304 @@ +package config + +import ( + "context" + cryptoRand "crypto/rand" + "database/sql" + "encoding/binary" + "encoding/json" + "fmt" + "github.com/target/goalert/keyring" + "github.com/target/goalert/permission" + "github.com/target/goalert/util" + "github.com/target/goalert/util/errutil" + "github.com/target/goalert/util/log" + "io/ioutil" + "math/rand" + "net/http" + "sync" + "time" + + "github.com/pkg/errors" +) + +// Store handles saving and loading configuration from a postgres database. +type Store struct { + rawCfg Config + cfgVers int + fallbackURL string + mx sync.RWMutex + db *sql.DB + keys keyring.Keys + latestConfig *sql.Stmt + setConfig *sql.Stmt + lock *sql.Stmt +} + +// NewStore will create a new Store with the given parameters. It will automatically detect +// new configuration changes. +func NewStore(ctx context.Context, db *sql.DB, keys keyring.Keys, fallbackURL string) (*Store, error) { + p := util.Prepare{Ctx: ctx, DB: db} + + s := &Store{ + db: db, + fallbackURL: fallbackURL, + latestConfig: p.P(`select id, data, schema from config where schema <= $1 order by id desc limit 1`), + setConfig: p.P(`insert into config (id, schema, data) values (DEFAULT, $1, $2) returning (id)`), + lock: p.P(`lock config in exclusive mode`), + keys: keys, + } + if p.Err != nil { + return nil, p.Err + } + + var err error + permission.SudoContext(ctx, func(ctx context.Context) { + err = s.Reload(ctx) + }) + if err != nil { + return nil, err + } + + var seed int64 + err = binary.Read(cryptoRand.Reader, binary.BigEndian, &seed) + if err != nil { + return nil, err + } + src := rand.New(rand.NewSource( + seed, + )) + + go func() { + randDelay := func() time.Duration { + return 30*time.Second + time.Duration(src.Int63n(int64(30*time.Second))) + } + t := time.NewTimer(randDelay()) + for range t.C { + t.Reset(randDelay()) + permission.SudoContext(context.Background(), func(ctx context.Context) { + err := s.Reload(ctx) + if err != nil { + log.Log(ctx, errors.Wrap(err, "config auto-reload")) + } + }) + } + }() + + return s, nil +} + +func wrapTx(ctx context.Context, tx *sql.Tx, stmt *sql.Stmt) *sql.Stmt { + if tx == nil { + return stmt + } + + return tx.StmtContext(ctx, stmt) +} + +// Reload will re-read and update the current config state from the DB. +func (s *Store) Reload(ctx context.Context) error { + cfg, id, err := s.reloadTx(ctx, nil) + if err != nil { + return err + } + rawCfg := *cfg + rawCfg.fallbackURL = s.fallbackURL + + err = cfg.Validate() + if err != nil { + log.Log(ctx, errors.Wrap(err, "validate config")) + } + + s.mx.Lock() + oldVers := s.cfgVers + s.cfgVers = id + s.rawCfg = rawCfg + s.mx.Unlock() + + if oldVers != id { + log.Logf(ctx, "Loaded config version %d ", id) + } + + return nil +} + +// ServeConfig handles requests to read and write the config json. +func (s *Store) ServeConfig(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + + switch req.Method { + case "GET": + id, _, data, err := s.ConfigData(ctx, nil) + if errutil.HTTPError(ctx, w, err) { + return + } + + w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="goalert-config.%d.json"`, id)) + _, err = w.Write(data) + if errutil.HTTPError(ctx, w, err) { + return + } + case "PUT": + data, err := ioutil.ReadAll(req.Body) + if errutil.HTTPError(ctx, w, err) { + return + } + + id, err := s.SetConfigData(ctx, nil, data) + if errutil.HTTPError(ctx, w, err) { + return + } + log.Logf(ctx, "Set configuration to version %d (schema version %d)", id, SchemaVersion) + + err = s.Reload(ctx) + if errutil.HTTPError(ctx, w, err) { + return + } + + w.WriteHeader(204) + default: + http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) + } +} + +// ConfigData will return the current raw config data from the DB. +func (s *Store) ConfigData(ctx context.Context, tx *sql.Tx) (id, schemaVersion int, data []byte, err error) { + err = permission.LimitCheckAny(ctx, permission.System, permission.Admin) + if err != nil { + return 0, 0, nil, err + } + + err = wrapTx(ctx, tx, s.latestConfig).QueryRowContext(ctx, SchemaVersion).Scan(&id, &data, &schemaVersion) + if err == sql.ErrNoRows { + return 0, SchemaVersion, []byte("{}"), nil + } + if err != nil { + return 0, 0, nil, err + } + + data, _, err = s.keys.Decrypt(data) + if err != nil { + return 0, 0, nil, errors.Wrap(err, "decrypt config") + } + + return id, schemaVersion, data, nil +} + +// SetConfigData will replace the current DB config with data. +func (s *Store) SetConfigData(ctx context.Context, tx *sql.Tx, data []byte) (int, error) { + err := permission.LimitCheckAny(ctx, permission.System, permission.Admin) + if err != nil { + return 0, err + } + + var cfg Config + err = json.Unmarshal(data, &cfg) + if err != nil { + return 0, errors.Wrap(err, "validate config") + } + + data, err = s.keys.Encrypt("CONFIG", data) + if err != nil { + return 0, errors.Wrap(err, "encrypt config") + } + + var id int + err = wrapTx(ctx, tx, s.setConfig).QueryRowContext(ctx, SchemaVersion, data).Scan(&id) + if err != nil { + return 0, err + } + + return id, nil +} + +func (s *Store) reloadTx(ctx context.Context, tx *sql.Tx) (*Config, int, error) { + id, schemaVersion, data, err := s.ConfigData(ctx, tx) + if err != nil { + return nil, 0, err + } + + var c Config + switch schemaVersion { + case 1: + err = json.Unmarshal(data, &c) + if err != nil { + return nil, 0, errors.Wrap(err, "unmarshal config") + } + default: + return nil, 0, errors.Errorf("invalid config schema version found: %d", schemaVersion) + } + + c.data = data + return &c, id, nil +} + +// UpdateConfig will update the configuration in the DB and perform an immediate reload. +func (s *Store) UpdateConfig(ctx context.Context, fn func(Config) (Config, error)) error { + err := permission.LimitCheckAny(ctx, permission.System, permission.Admin) + if err != nil { + return err + } + tx, err := s.db.BeginTx(ctx, nil) + if err != nil { + return err + } + defer tx.Rollback() + id, err := s.updateConfigTx(ctx, tx, fn) + if err != nil { + return err + } + err = tx.Commit() + if err != nil { + return err + } + + log.Logf(ctx, "Set configuration to version %d (schema version %d)", id, SchemaVersion) + + return s.Reload(ctx) +} + +// SetConfig will replace the configuration in the DB and perform an immediate reload. +func (s *Store) SetConfig(ctx context.Context, cfg Config) error { + return s.UpdateConfig(ctx, func(Config) (Config, error) { return cfg, nil }) +} + +func (s *Store) updateConfigTx(ctx context.Context, tx *sql.Tx, fn func(Config) (Config, error)) (int, error) { + _, err := tx.StmtContext(ctx, s.lock).ExecContext(ctx) + if err != nil { + return 0, err + } + + cfg, _, err := s.reloadTx(ctx, tx) + if err != nil { + return 0, err + } + + newCfg, err := fn(*cfg) + if err != nil { + return 0, err + } + err = newCfg.Validate() + if err != nil { + return 0, err + } + + data, err := json.Marshal(newCfg) + if err != nil { + return 0, errors.Wrap(err, "marshal config") + } + + data, err = mergeJSON(cfg.data, data) + if err != nil { + return 0, errors.Wrap(err, "merge config") + } + + return s.SetConfigData(ctx, tx, data) +} + +// Config will return the current config state. +func (s *Store) Config() Config { + s.mx.RLock() + cfg := s.rawCfg + s.mx.RUnlock() + return cfg +} diff --git a/dataloader/alertloader.go b/dataloader/alertloader.go new file mode 100644 index 0000000000..3eb3946932 --- /dev/null +++ b/dataloader/alertloader.go @@ -0,0 +1,89 @@ +package dataloader + +import ( + "context" + "github.com/target/goalert/alert" + "strconv" + "time" +) + +type AlertLoader struct { + alertLoader *loader + stateLoader *loader + + store alert.Store +} + +func NewAlertLoader(ctx context.Context, store alert.Store) *AlertLoader { + p := &AlertLoader{ + store: store, + } + p.alertLoader = newLoader(ctx, loaderConfig{ + Max: 100, + Delay: time.Millisecond, + IDFunc: func(v interface{}) string { return strconv.Itoa(v.(*alert.Alert).ID) }, + FetchFunc: p.fetchAlerts, + }) + p.stateLoader = newLoader(ctx, loaderConfig{ + Max: 100, + Delay: time.Millisecond, + IDFunc: func(v interface{}) string { return strconv.Itoa(v.(*alert.State).AlertID) }, + FetchFunc: p.fetchAlertsState, + }) + return p +} + +func (l *AlertLoader) FetchOneAlert(ctx context.Context, id int) (*alert.Alert, error) { + v, err := l.alertLoader.FetchOne(ctx, strconv.Itoa(id)) + if err != nil { + return nil, err + } + if v == nil { + return nil, err + } + return v.(*alert.Alert), nil +} +func (l *AlertLoader) FetchOneAlertState(ctx context.Context, alertID int) (*alert.State, error) { + v, err := l.stateLoader.FetchOne(ctx, strconv.Itoa(alertID)) + if err != nil { + return nil, err + } + if v == nil { + return nil, err + } + return v.(*alert.State), nil +} + +func (l *AlertLoader) fetchAlerts(ctx context.Context, ids []string) ([]interface{}, error) { + intIDs := make([]int, len(ids)) + for i, id := range ids { + intIDs[i], _ = strconv.Atoi(id) + } + many, err := l.store.FindMany(ctx, intIDs) + if err != nil { + return nil, err + } + + res := make([]interface{}, len(many)) + for i := range many { + res[i] = &many[i] + } + return res, nil +} + +func (l *AlertLoader) fetchAlertsState(ctx context.Context, ids []string) ([]interface{}, error) { + intIDs := make([]int, len(ids)) + for i, id := range ids { + intIDs[i], _ = strconv.Atoi(id) + } + many, err := l.store.State(ctx, intIDs) + if err != nil { + return nil, err + } + + res := make([]interface{}, len(many)) + for i := range many { + res[i] = &many[i] + } + return res, nil +} diff --git a/dataloader/cmloader.go b/dataloader/cmloader.go new file mode 100644 index 0000000000..1809f38049 --- /dev/null +++ b/dataloader/cmloader.go @@ -0,0 +1,52 @@ +package dataloader + +import ( + "context" + "github.com/target/goalert/user/contactmethod" + "time" +) + +// CMLoader will load user contact methods from postgres. +type CMLoader struct { + *loader + store contactmethod.Store +} + +// NewCMLoader will create a new CMLoader using the provided store for fetch operations. +func NewCMLoader(ctx context.Context, store contactmethod.Store) *CMLoader { + p := &CMLoader{ + store: store, + } + p.loader = newLoader(ctx, loaderConfig{ + Max: 100, + Delay: time.Millisecond, + IDFunc: func(v interface{}) string { return v.(*contactmethod.ContactMethod).ID }, + FetchFunc: p.fetch, + }) + return p +} + +// FetchOne will fetch a single record from the store, batching requests to the store. +func (l *CMLoader) FetchOne(ctx context.Context, id string) (*contactmethod.ContactMethod, error) { + v, err := l.loader.FetchOne(ctx, id) + if err != nil { + return nil, err + } + if v == nil { + return nil, err + } + return v.(*contactmethod.ContactMethod), nil +} + +func (l *CMLoader) fetch(ctx context.Context, ids []string) ([]interface{}, error) { + many, err := l.store.FindMany(ctx, ids) + if err != nil { + return nil, err + } + + res := make([]interface{}, len(many)) + for i := range many { + res[i] = &many[i] + } + return res, nil +} diff --git a/dataloader/loader.go b/dataloader/loader.go new file mode 100644 index 0000000000..a1e2e282ef --- /dev/null +++ b/dataloader/loader.go @@ -0,0 +1,226 @@ +package dataloader + +import ( + "context" + "time" + + "go.opencensus.io/trace" +) + +type loaderConfig struct { + FetchFunc func(context.Context, []string) ([]interface{}, error) // FetchFunc should return resources for the provided IDs (order doesn't matter). + IDFunc func(interface{}) string // Should return the unique ID for a given resource. + + Delay time.Duration // Delay before fetching pending requests. + Max int // Max number of pending requests before immediate fetch (also max number of requests per-db-call). + + Name string // Name to use in traces. +} + +type loaderReq struct { + id string + ch chan *loaderEntry +} + +type loaderEntry struct { + id string + done chan struct{} + err error + data interface{} +} +type loader struct { + ctx context.Context + + cfg loaderConfig + cache map[string]*loaderEntry + err error + doneCh chan struct{} + + reqCh chan loaderReq +} + +func newLoader(ctx context.Context, cfg loaderConfig) *loader { + l := &loader{ + ctx: ctx, + cfg: cfg, + + cache: make(map[string]*loaderEntry, cfg.Max), + reqCh: make(chan loaderReq, cfg.Max), + doneCh: make(chan struct{}), + } + go l.loop() + return l +} + +// load will perform a batch load for a list of entries +func (l *loader) load(entries []*loaderEntry) []*loaderEntry { + + // If we need to load more than the max, call load with the max, and return the rest. + if len(entries) > l.cfg.Max { + l.load(entries[:l.cfg.Max]) + return entries[l.cfg.Max:] + } + + // We need to copy the list so we don't get overwritten if other + // batch updates are done in the background while this call is processing. + cpy := make([]*loaderEntry, len(entries)) + copy(cpy, entries) + + go func() { + ctx, sp := trace.StartSpan(l.ctx, "Loader.Fetch/"+l.cfg.Name) + defer sp.End() + + // Map the entries out by ID, and collect the list of IDs + // for the DB call. + m := make(map[string]*loaderEntry, len(entries)) + ids := make([]string, len(entries)) + for i, e := range cpy { + ids[i] = e.id + m[e.id] = e + } + + // Call fetch for everything we're loading + res, err := l.cfg.FetchFunc(ctx, ids) + if err != nil { + // If the fetch failed, set all the pending entires err property to + // reflect the failure, and close the done channel to indicate the load/fetch + // completed. + for _, e := range cpy { + e.err = err + close(e.done) + } + return + } + + for i := range res { + // Go through each received response and update the data value based on the ID. + // We're processing against a map so we can ignore order within fetch methods. + id := l.cfg.IDFunc(res[i]) + e := m[id] + if e == nil { + // Ignore any unknown/unexpected results + continue + } + e.data = res[i] + } + + // nil or not, all entires are now done loading, if the .data prop was not set + // then the entry does not exist. + for _, e := range cpy { + close(e.done) + } + }() + + return entries[:0] +} + +// entry will return the current entry or create a new one in the map. +// It passes the new or existing loaderEntry to the requester. +func (l *loader) entry(req loaderReq) (*loaderEntry, bool) { + if v, ok := l.cache[req.id]; ok { + req.ch <- v + return v, false + } + + e := &loaderEntry{ + id: req.id, + done: make(chan struct{}), + } + l.cache[req.id] = e + req.ch <- e + return e, true +} + +func (l *loader) loop() { + + // timerStart tracks if the delay timer has started or not. + var timerStart bool + var t *time.Timer + // waitCh by default will block indefinitely, since the timer + // shouldn't start until the first pending request is made. + waitCh := (<-chan time.Time)(make(chan time.Time)) + batch := make([]*loaderEntry, 0, l.cfg.Max) + + var req loaderReq + for { + select { + case <-waitCh: + // timer expired load immediately + batch = l.load(batch) + timerStart = false + case <-l.ctx.Done(): + // context expired, return err for all new requests + l.err = l.ctx.Err() + close(l.doneCh) + for _, b := range batch { + // return err for all pending requests + b.err = l.err + close(b.done) + } + return + case req = <-l.reqCh: + e, isNew := l.entry(req) + if !isNew { + // request for that ID is already pending, nothing to do + continue + } + + // new entries get added to the batch + batch = append(batch, e) + } + + // If we ever exceed max, immediately load, batch then becomes + // whatever is left. + if len(batch) > l.cfg.Max { + batch = l.load(batch) + } + + if !timerStart && len(batch) > 0 { + // If the timer hasn't started, but we have something waiting, start it + timerStart = true + t = time.NewTimer(l.cfg.Delay) + waitCh = t.C + } else if timerStart && len(batch) == 0 { + // If the timer is running, but there are no more pending entries, + // stop it. + t.Stop() + waitCh = make(chan time.Time) + timerStart = false + } + } +} + +func (l *loader) FetchOne(ctx context.Context, id string) (interface{}, error) { + req := loaderReq{ + id: id, + // We use a buffered channel so we don't have anything block if we jump out + // of this method (e.g. for context deadline). + ch: make(chan *loaderEntry, 1), + } + + // Wait for context, loader shutdown, or acceptance of our request. + select { + case <-ctx.Done(): + return nil, ctx.Err() + case l.reqCh <- req: + case <-l.doneCh: + return nil, l.err + } + + // Wait for context, or the loaderEntry associated with our request. + var resp *loaderEntry + select { + case <-ctx.Done(): + return nil, ctx.Err() + case resp = <-req.ch: + } + + // Wait for context, or confirmation that our entry has finished loading. + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-resp.done: + } + + return resp.data, resp.err +} diff --git a/dataloader/loader_test.go b/dataloader/loader_test.go new file mode 100644 index 0000000000..078470d08d --- /dev/null +++ b/dataloader/loader_test.go @@ -0,0 +1,53 @@ +package dataloader + +import ( + "context" + "testing" + "time" +) + +func TestLoader_FetchOne(t *testing.T) { + type example struct{ id string } + cfg := loaderConfig{ + Max: 10, + Delay: time.Millisecond, + IDFunc: func(v interface{}) string { return v.(*example).id }, + FetchFunc: func(context.Context, []string) ([]interface{}, error) { return []interface{}{&example{id: "foo"}}, nil }, + } + l := newLoader(context.Background(), cfg) + + res, err := l.FetchOne(context.Background(), "foo") + if err != nil { + t.Fatal(err) + } + + if r, ok := res.(*example); ok { + if r.id != "foo" { + t.Errorf("got id=%s; want foo", r.id) + } + } else { + t.Errorf("got %T; want *example", res) + } +} + +func TestLoader_FetchOne_Missing(t *testing.T) { + type example struct{ id string } + cfg := loaderConfig{ + Max: 10, + Delay: time.Millisecond, + IDFunc: func(v interface{}) string { return v.(*example).id }, + FetchFunc: func(context.Context, []string) ([]interface{}, error) { return []interface{}{&example{id: "bar"}}, nil }, + } + ctx, done := context.WithTimeout(context.Background(), time.Second) + defer done() + l := newLoader(ctx, cfg) + + res, err := l.FetchOne(ctx, "foo") + if err != nil { + t.Fatal(err) + } + + if res != nil { + t.Errorf("got %T; want nil", res) + } +} diff --git a/dataloader/policyloader.go b/dataloader/policyloader.go new file mode 100644 index 0000000000..0f098d26d6 --- /dev/null +++ b/dataloader/policyloader.go @@ -0,0 +1,49 @@ +package dataloader + +import ( + "context" + "github.com/target/goalert/escalation" + "time" +) + +type PolicyLoader struct { + *loader + store escalation.Store +} + +func NewPolicyLoader(ctx context.Context, store escalation.Store) *PolicyLoader { + p := &PolicyLoader{ + store: store, + } + p.loader = newLoader(ctx, loaderConfig{ + Max: 100, + Delay: time.Millisecond, + IDFunc: func(pol interface{}) string { return pol.(*escalation.Policy).ID }, + FetchFunc: p.fetch, + }) + return p +} + +func (p *PolicyLoader) FetchOne(ctx context.Context, id string) (*escalation.Policy, error) { + pol, err := p.loader.FetchOne(ctx, id) + if err != nil { + return nil, err + } + if pol == nil { + return nil, err + } + return pol.(*escalation.Policy), nil +} + +func (p *PolicyLoader) fetch(ctx context.Context, ids []string) ([]interface{}, error) { + pol, err := p.store.FindManyPolicies(ctx, ids) + if err != nil { + return nil, err + } + + res := make([]interface{}, len(pol)) + for i := range pol { + res[i] = &pol[i] + } + return res, nil +} diff --git a/dataloader/rotationloader.go b/dataloader/rotationloader.go new file mode 100644 index 0000000000..004f0413aa --- /dev/null +++ b/dataloader/rotationloader.go @@ -0,0 +1,49 @@ +package dataloader + +import ( + "context" + "github.com/target/goalert/schedule/rotation" + "time" +) + +type RotationLoader struct { + *loader + store rotation.Store +} + +func NewRotationLoader(ctx context.Context, store rotation.Store) *RotationLoader { + p := &RotationLoader{ + store: store, + } + p.loader = newLoader(ctx, loaderConfig{ + Max: 100, + Delay: time.Millisecond, + IDFunc: func(v interface{}) string { return v.(*rotation.Rotation).ID }, + FetchFunc: p.fetch, + }) + return p +} + +func (l *RotationLoader) FetchOne(ctx context.Context, id string) (*rotation.Rotation, error) { + v, err := l.loader.FetchOne(ctx, id) + if err != nil { + return nil, err + } + if v == nil { + return nil, err + } + return v.(*rotation.Rotation), nil +} + +func (l *RotationLoader) fetch(ctx context.Context, ids []string) ([]interface{}, error) { + many, err := l.store.FindMany(ctx, ids) + if err != nil { + return nil, err + } + + res := make([]interface{}, len(many)) + for i := range many { + res[i] = &many[i] + } + return res, nil +} diff --git a/dataloader/scheduleloader.go b/dataloader/scheduleloader.go new file mode 100644 index 0000000000..41a519bf84 --- /dev/null +++ b/dataloader/scheduleloader.go @@ -0,0 +1,49 @@ +package dataloader + +import ( + "context" + "github.com/target/goalert/schedule" + "time" +) + +type ScheduleLoader struct { + *loader + store schedule.Store +} + +func NewScheduleLoader(ctx context.Context, store schedule.Store) *ScheduleLoader { + p := &ScheduleLoader{ + store: store, + } + p.loader = newLoader(ctx, loaderConfig{ + Max: 100, + Delay: time.Millisecond, + IDFunc: func(v interface{}) string { return v.(*schedule.Schedule).ID }, + FetchFunc: p.fetch, + }) + return p +} + +func (l *ScheduleLoader) FetchOne(ctx context.Context, id string) (*schedule.Schedule, error) { + v, err := l.loader.FetchOne(ctx, id) + if err != nil { + return nil, err + } + if v == nil { + return nil, err + } + return v.(*schedule.Schedule), nil +} + +func (l *ScheduleLoader) fetch(ctx context.Context, ids []string) ([]interface{}, error) { + many, err := l.store.FindMany(ctx, ids) + if err != nil { + return nil, err + } + + res := make([]interface{}, len(many)) + for i := range many { + res[i] = &many[i] + } + return res, nil +} diff --git a/dataloader/serviceloader.go b/dataloader/serviceloader.go new file mode 100644 index 0000000000..dbcd0aa603 --- /dev/null +++ b/dataloader/serviceloader.go @@ -0,0 +1,49 @@ +package dataloader + +import ( + "context" + "github.com/target/goalert/service" + "time" +) + +type ServiceLoader struct { + *loader + store service.Store +} + +func NewServiceLoader(ctx context.Context, store service.Store) *ServiceLoader { + p := &ServiceLoader{ + store: store, + } + p.loader = newLoader(ctx, loaderConfig{ + Max: 100, + Delay: time.Millisecond, + IDFunc: func(v interface{}) string { return v.(*service.Service).ID }, + FetchFunc: p.fetch, + }) + return p +} + +func (l *ServiceLoader) FetchOne(ctx context.Context, id string) (*service.Service, error) { + svc, err := l.loader.FetchOne(ctx, id) + if err != nil { + return nil, err + } + if svc == nil { + return nil, err + } + return svc.(*service.Service), nil +} + +func (l *ServiceLoader) fetch(ctx context.Context, ids []string) ([]interface{}, error) { + many, err := l.store.FindMany(ctx, ids) + if err != nil { + return nil, err + } + + res := make([]interface{}, len(many)) + for i := range many { + res[i] = &many[i] + } + return res, nil +} diff --git a/dataloader/userloader.go b/dataloader/userloader.go new file mode 100644 index 0000000000..271de7f8a5 --- /dev/null +++ b/dataloader/userloader.go @@ -0,0 +1,49 @@ +package dataloader + +import ( + "context" + "github.com/target/goalert/user" + "time" +) + +type UserLoader struct { + *loader + store user.Store +} + +func NewUserLoader(ctx context.Context, store user.Store) *UserLoader { + p := &UserLoader{ + store: store, + } + p.loader = newLoader(ctx, loaderConfig{ + Max: 100, + Delay: time.Millisecond, + IDFunc: func(v interface{}) string { return v.(*user.User).ID }, + FetchFunc: p.fetch, + }) + return p +} + +func (l *UserLoader) FetchOne(ctx context.Context, id string) (*user.User, error) { + v, err := l.loader.FetchOne(ctx, id) + if err != nil { + return nil, err + } + if v == nil { + return nil, err + } + return v.(*user.User), nil +} + +func (l *UserLoader) fetch(ctx context.Context, ids []string) ([]interface{}, error) { + many, err := l.store.FindMany(ctx, ids) + if err != nil { + return nil, err + } + + res := make([]interface{}, len(many)) + for i := range many { + res[i] = &many[i] + } + return res, nil +} diff --git a/devtools/ci/dockerfiles/all-in-one/Dockerfile b/devtools/ci/dockerfiles/all-in-one/Dockerfile new file mode 100644 index 0000000000..ec6d807bc6 --- /dev/null +++ b/devtools/ci/dockerfiles/all-in-one/Dockerfile @@ -0,0 +1,3 @@ +FROM postgres:11-alpine +COPY goalert waitfor init.sql start.sh /bin/ +CMD ["/bin/start.sh"] diff --git a/devtools/ci/dockerfiles/all-in-one/README.md b/devtools/ci/dockerfiles/all-in-one/README.md new file mode 100644 index 0000000000..94a7c64a12 --- /dev/null +++ b/devtools/ci/dockerfiles/all-in-one/README.md @@ -0,0 +1,10 @@ +# GoAlert All-In-One Container + +This directory contains the `Dockerfile` for building GoAlert's all-in-one (demo) Docker container. +This container provides a simple way to start and explore GoAlert. It is not recommended for production use. + +### Assumptions + +`goalert` binary built with `GOOS=linux BUNDLE=1` located in this directory before docker build. +`waitfor` binary built with `GOOS=linux` located in this directory before docker build. +`init.sql` PostgreSQL demo data located in this directory before docker build. diff --git a/devtools/ci/dockerfiles/all-in-one/start.sh b/devtools/ci/dockerfiles/all-in-one/start.sh new file mode 100755 index 0000000000..80312c337f --- /dev/null +++ b/devtools/ci/dockerfiles/all-in-one/start.sh @@ -0,0 +1,17 @@ +#!/bin/sh +set -e + +./usr/local/bin/docker-entrypoint.sh postgres &> /var/log/postgres.log & + +export DB_URL=postgres://postgres@127.0.0.1/postgres?sslmode=disable + +./bin/waitfor "$DB_URL" + +if test -f /bin/init.sql +then + echo Seeding DB with demo data... + psql -d "$DB_URL" < /bin/init.sql > /dev/null + mv /bin/init.sql /bin/init.sql.applied +fi + +exec /bin/goalert --db-url "$DB_URL" --log-requests -l ":8081" diff --git a/devtools/ci/dockerfiles/build-env/Dockerfile b/devtools/ci/dockerfiles/build-env/Dockerfile new file mode 100644 index 0000000000..dcb7450ebf --- /dev/null +++ b/devtools/ci/dockerfiles/build-env/Dockerfile @@ -0,0 +1,7 @@ +FROM postgres:11 + +RUN apt-get update && apt-get install -y build-essential nginx curl xvfb libgtk2.0-0 libnotify-dev libgconf-2-4 libnss3 libxss1 libasound2 libpng-dev git && rm -rf /var/lib/apt/lists/* +RUN curl -s https://nodejs.org/dist/v10.15.3/node-v10.15.3-linux-x64.tar.xz | tar xJ --strip-components 1 -C / +RUN curl -s https://dl.google.com/go/go1.12.4.linux-amd64.tar.gz | tar xz -C /usr/local +ENV PATH /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/node_modules/.bin:/usr/lib/postgresql/9.6/bin:/usr/local/go/bin:/root/go/bin +RUN npm install -g yarn diff --git a/devtools/ci/tasks/build-debug.yml b/devtools/ci/tasks/build-debug.yml new file mode 100644 index 0000000000..3232ddd9dc --- /dev/null +++ b/devtools/ci/tasks/build-debug.yml @@ -0,0 +1,24 @@ +platform: linux +inputs: +- name: goalert +- name: db-dumps +- name: videos +- name: screenshots +outputs: +- name: build-debug +image_resource: + type: registry-image + source: + repository: alpine +run: + path: sh + args: + - -c + - | + mkdir skip_vids + mv videos/* skip_vids/ + for dir in screenshots/* + do + mv skip_vids/$(basename "$dir").mp4 videos/ + done + tar czf build-debug/build-debug-$(cat goalert/.git/short_ref || date +%F_%H-%M-%S).tgz db-dumps videos screenshots diff --git a/devtools/ci/tasks/build-test.yml b/devtools/ci/tasks/build-test.yml new file mode 100644 index 0000000000..32e4522f3d --- /dev/null +++ b/devtools/ci/tasks/build-test.yml @@ -0,0 +1,27 @@ +platform: linux +caches: + - path: /go/pkg + - path: /root/go/pkg + - path: goalert/web/src/node_modules + - path: /usr/local/share/.cache/yarn/v4 + - path: /root/.cache +inputs: + - name: goalert +outputs: + - name: bin + path: goalert/bin + - name: db-dumps + path: goalert/smoketest/smoketest_db_dump + - name: videos + path: goalert/web/src/cypress/videos + - name: screenshots + path: goalert/web/src/cypress/screenshots +image_resource: + type: registry-image + source: + repository: gcr.io/tgt-goalert/goalert-build-env + username: _json_key + password: ((storage-account)) +run: + dir: goalert + path: ./devtools/ci/tasks/scripts/build-test.sh diff --git a/devtools/ci/tasks/scripts/build-test.sh b/devtools/ci/tasks/scripts/build-test.sh new file mode 100755 index 0000000000..bbf24ef5f4 --- /dev/null +++ b/devtools/ci/tasks/scripts/build-test.sh @@ -0,0 +1,20 @@ +#!/bin/sh +set -ex + +mkdir -p /run/postgresql +chown postgres:postgres /run/postgresql +export PGDATA=/var/lib/postgresql/data +su postgres -c /usr/lib/postgresql/11/bin/initdb +su postgres -c '/usr/lib/postgresql/11/bin/pg_ctl start' +trap 'su postgres -c "/usr/lib/postgresql/11/bin/pg_ctl -m immediate stop"' EXIT +export DB_URL=postgres://postgres@localhost:5432?sslmode=disable + +touch web/src/yarn.lock # ensure we refresh node_modules + +make check test install smoketest cypress DB_URL=$DB_URL BUNDLE=1 + +CYPRESS_viewportWidth=1440 CYPRESS_viewportHeight=900 bin/runjson -logs=./wide-logs bin/goalert-$(date +%Y%m%d%H%M%S).gz diff --git a/devtools/ci/tasks/scripts/codecheck.sh b/devtools/ci/tasks/scripts/codecheck.sh new file mode 100755 index 0000000000..deef19bd20 --- /dev/null +++ b/devtools/ci/tasks/scripts/codecheck.sh @@ -0,0 +1,21 @@ +#!/bin/sh +set -e + +NOFMT=$(gofmt -l $(find . -name '*.go' |grep -v /vendor)) + +if test "$NOFMT" != "" +then + echo "Found non-formatted files:" + echo "$NOFMT" + exit 1 +fi + +CHANGES=$(git status -s --porcelain) + +if test "$CHANGES" != "" +then + echo "Found changes in git:" + echo "$CHANGES" + exit 1 +fi + diff --git a/devtools/configparams/main.go b/devtools/configparams/main.go new file mode 100644 index 0000000000..00a24f21c9 --- /dev/null +++ b/devtools/configparams/main.go @@ -0,0 +1,199 @@ +package main + +import ( + "flag" + "fmt" + "os" + "reflect" + "strconv" + "strings" + "text/template" + + "github.com/target/goalert/config" +) + +func hasType(typeName string, fields []field) bool { + for _, f := range fields { + if f.Type == typeName { + return true + } + } + return false +} + +var tmpl = template.Must( + template. + New("mapconfig.go"). + Funcs(template.FuncMap{ + "quote": strconv.Quote, + "hasBool": func(fields []field) bool { return hasType("ConfigTypeBoolean", fields) }, + "hasStrList": func(fields []field) bool { return hasType("ConfigTypeStringList", fields) }, + "hasInt": func(fields []field) bool { return hasType("ConfigTypeInteger", fields) }, + }). + Parse(` +import ( + "github.com/target/goalert/config" + "github.com/target/goalert/validation" +) + +// MapConfigValues will map a Config struct into a flat list of ConfigValue structs. +func MapConfigValues(cfg config.Config) []ConfigValue { + return []ConfigValue{ + {{- range . }} + {ID: {{quote .ID}}, Type: {{.Type}}, Description: {{quote .Desc}}, Value: {{.Value}}{{if .Password}}, Password: true{{end}}}, + {{- end}} + } +} + +// MapPublicConfigValues will map a Config struct into a flat list of ConfigValue structs. +func MapPublicConfigValues(cfg config.Config) []ConfigValue { + return []ConfigValue{ + {{- range . }} + {{- if .Public}} + {ID: {{quote .ID}}, Type: {{.Type}}, Description: {{quote .Desc}}, Value: {{.Value}}{{if .Password}}, Password: true{{end}}}, + {{- end}} + {{- end}} + } +} + +// ApplyConfigValues will apply a list of ConfigValues to a Config struct. +func ApplyConfigValues(cfg config.Config, vals []ConfigValueInput) (config.Config, error) { + {{- if hasStrList .}} + parseStringList := func(v string) []string { + if v == "" { + return nil + } + return strings.Split(v, "\n") + } + {{- end}} + {{- if hasInt .}} + parseInt := func(id, v string) (int, error) { + if v == "" { + return 0, nil + } + val, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return 0, validation.NewFieldError("\""+id+"\".Value", "integer value invalid: " + err.Error()) + } + return int(val), nil + } + {{- end}} + {{- if hasBool .}} + parseBool := func(id, v string) (bool, error) { + switch v { + case "true": + return true, nil + case "false": + return false, nil + default: + return false, validation.NewFieldError("\""+id+"\".Value", "boolean value invalid: expected 'true' or 'false'") + } + } + {{- end}} + for _, v := range vals { + switch v.ID { + {{- range .}} + case {{quote .ID}}: + {{- if eq .Type "ConfigTypeString"}} + cfg.{{.ID}} = v.Value + {{- else if eq .Type "ConfigTypeStringList"}} + cfg.{{.ID}} = parseStringList(v.Value) + {{- else if eq .Type "ConfigTypeInteger"}} + val, err := parseInt(v.ID, v.Value) + if err != nil { + return cfg, err + } + cfg.{{.ID}} = val + {{- else if eq .Type "ConfigTypeBoolean"}} + val, err := parseBool(v.ID, v.Value) + if err != nil { + return cfg, err + } + cfg.{{.ID}} = val + {{- end}} + {{- end}} + default: + return cfg, validation.NewFieldError("ID", fmt.Sprintf("unknown config ID '%s'", v.ID)) + } + } + return cfg, nil +} +`)) + +type field struct { + ID, Type, Desc, Value string + Public, Password bool +} + +func main() { + out := flag.String("out", "", "Output file.") + flag.Parse() + + w := os.Stdout + if *out != "" { + fd, err := os.Create(*out) + if err != nil { + panic(err) + } + defer fd.Close() + w = fd + } + + fmt.Fprintln(w, `// Code generated by devtools/configparams DO NOT EDIT. + +package graphql2`) + + fields := printType("", reflect.TypeOf(config.Config{}), "", false, false) + + err := tmpl.Execute(w, fields) + if err != nil { + panic(err) + } +} + +func printField(prefix string, f reflect.StructField) []field { + fPrefix := prefix + f.Name + "." + if f.Type.Kind() == reflect.Slice && f.Type.Elem().Kind() == reflect.Struct { + fPrefix = prefix + f.Name + "[]." + } + return printType(fPrefix, f.Type, f.Tag.Get("info"), f.Tag.Get("public") == "true", f.Tag.Get("password") == "true") +} +func printType(prefix string, v reflect.Type, details string, public, pass bool) []field { + var f []field + key := strings.TrimSuffix(prefix, ".") + + var typ, value string + switch v.Kind() { + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if v.Field(i).PkgPath != "" { + // skip unexported fields + continue + } + f = append(f, printField(prefix, v.Field(i))...) + } + return f + case reflect.Bool: + typ = "ConfigTypeBoolean" + value = fmt.Sprintf(`fmt.Sprintf("%%t", cfg.%s)`, key) + case reflect.String: + typ = "ConfigTypeString" + value = fmt.Sprintf(`cfg.%s`, key) + case reflect.Int: + typ = "ConfigTypeInteger" + value = fmt.Sprintf(`fmt.Sprintf("%%d", cfg.%s)`, key) + case reflect.Slice: + switch v.Elem().Kind() { + case reflect.String: + typ = "ConfigTypeStringList" + value = fmt.Sprintf(`strings.Join(cfg.%s, "\n")`, key) + default: + panic(fmt.Sprintf("not implemented for type []%v", v.Elem().Kind())) + } + default: + panic(fmt.Sprintf("not implemented for type %T", v.Kind())) + } + + f = append(f, field{ID: key, Type: typ, Desc: details, Value: value, Public: public, Password: pass}) + return f +} diff --git a/devtools/devtools.go b/devtools/devtools.go new file mode 100644 index 0000000000..735aab3a5f --- /dev/null +++ b/devtools/devtools.go @@ -0,0 +1,4 @@ +// Package devtools is just a placeholder for tools.go. +// +// https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module +package devtools diff --git a/devtools/fetchcounts/main.go b/devtools/fetchcounts/main.go new file mode 100644 index 0000000000..d9ce021807 --- /dev/null +++ b/devtools/fetchcounts/main.go @@ -0,0 +1,55 @@ +package main + +import ( + "context" + "database/sql" + "flag" + "fmt" + "github.com/target/goalert/util/log" + "os" + + _ "github.com/lib/pq" +) + +var queries = [][]string{ + {"UserCount", `select count(id) from users`}, + {"CMMax", `select count(id) from user_contact_methods group by user_id order by count desc limit 1`}, + {"NRMax", `select count(id) from user_notification_rules group by user_id order by count desc limit 1`}, + {"NRCMMax", `select count(id) from user_notification_rules group by user_id,contact_method_id order by count desc limit 1`}, + {"EPCount", `select count(id) from escalation_policies`}, + {"EPMaxStep", `select count(id) from escalation_policy_steps group by escalation_policy_id order by count desc limit 1`}, + {"EPMaxAssigned", `select count(id) from escalation_policy_actions group by escalation_policy_step_id order by count desc limit 1`}, + {"SvcCount", `select count(id) from services`}, + {"RotationMaxPart", `select count(id) from rotation_participants group by rotation_id order by count desc limit 1`}, + {"ScheduleCount", `select count(id) from schedules`}, + {"AlertClosedCount", `select count(id) from alerts where status = 'closed'`}, + {"AlertActiveCount", `select count(id) from alerts where status = 'triggered' or status = 'active'`}, + {"RotationCount", `select count(id) from rotations`}, + {"IntegrationKeyMax", `select count(id) from integration_keys group by service_id order by count desc limit 1`}, + {"ScheduleMaxRules", `select count(id) from schedule_rules group by schedule_id order by count desc limit 1`}, +} + +func noErr(err error) { + if err == nil { + return + } + log.Log(context.TODO(), err) + os.Exit(1) +} + +func main() { + mult := flag.Float64("m", 1.5, "Multiplier for prod values.") + url := flag.String("db", os.Getenv("DB_URL"), "DB connection URL.") + flag.Parse() + db, err := sql.Open("postgres", *url) + noErr(err) + + for _, q := range queries { + var n int + row := db.QueryRow(q[1]) + noErr(row.Scan(&n)) + n = int(float64(n)**mult) + 1 + fmt.Printf("\t%s = %d // %s\n", q[0], n, q[1]) + } + db.Close() +} diff --git a/devtools/gqlgen/gqlgen.go b/devtools/gqlgen/gqlgen.go new file mode 100644 index 0000000000..2b8baebc32 --- /dev/null +++ b/devtools/gqlgen/gqlgen.go @@ -0,0 +1,7 @@ +package main + +import "github.com/99designs/gqlgen/cmd" + +func main() { + cmd.Execute() +} diff --git a/devtools/inliner/linebreaker.go b/devtools/inliner/linebreaker.go new file mode 100644 index 0000000000..1441df28c1 --- /dev/null +++ b/devtools/inliner/linebreaker.go @@ -0,0 +1,48 @@ +package main + +import ( + "io" +) + +const lineLength = 80 + +type lineBreaker struct { + pos int + + out io.Writer +} + +var nl = []byte{'\n'} + +func (l *lineBreaker) Write(b []byte) (n int, err error) { + if len(b) == 0 { + return 0, nil + } + if l.pos+len(b) < lineLength { + l.pos += len(b) + return l.out.Write(b) + } + + diff := lineLength - l.pos + + n, err = l.out.Write(b[:diff]) + if err != nil { + return n, err + } + l.pos = 0 + + n, err = l.out.Write(nl) + if err != nil { + return diff + n, err + } + + n, err = l.Write(b[diff:]) + return lineLength + 1 + n, err +} + +func (l *lineBreaker) Close() (err error) { + if l.pos > 0 { + _, err = l.out.Write(nl) + } + return err +} diff --git a/devtools/inliner/main.go b/devtools/inliner/main.go new file mode 100644 index 0000000000..2a7eca37a0 --- /dev/null +++ b/devtools/inliner/main.go @@ -0,0 +1,175 @@ +package main + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "flag" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "text/template" + + "golang.org/x/tools/imports" +) + +const typesTmplStr = `// Code generated by inliner DO NOT EDIT. + +package {{.PackageName}} + +type File struct { + Name string + Data func() []byte +} + +var Files []File +` + +const dataTmplStr = `// Code generated by inliner DO NOT EDIT. + +package {{.PackageName}} + +{{if .Files}} +func init() { + var parseOnce sync.Once + var data []byte + + dataStr := {{.Encoded}} + dataRange := func(start, end int) (func() []byte) { + return func() []byte { + parseOnce.Do(func(){ + dec := base64.NewDecoder( + base64.URLEncoding, + bytes.NewBufferString(strings.Replace(dataStr, "\n", "", -1)), + ) + r, err := gzip.NewReader(dec) + if err != nil { + panic(err) + } + defer r.Close() + + buf := new(bytes.Buffer) + buf.Grow({{.Data.Len}}) + + _, err = io.Copy(buf, r) + if err != nil { + panic(err) + } + + data = buf.Bytes() + }) + + return data[start:end] + } + } + + + Files = []File{ + {{- range .Files}} + {Data: dataRange({{.DataStart}},{{.DataEnd}}), Name: {{printf "%q" .Path}}}, + {{- end}} + } +} +{{else}} +// No files +{{end}} +` + +var ( + typesTmpl = template.Must(template.New("types").Parse(typesTmplStr)) + dataTmpl = template.Must(template.New("data").Parse(dataTmplStr)) +) + +type file struct { + Path string + DataStart int + DataLen int +} + +func (f file) DataEnd() int { + return f.DataStart + f.DataLen +} + +func (dm *dataMap) Encoded() string { + return "`\n" + dm.Data.String() + "`" +} + +type dataMap struct { + PackageName string + Files []*file + Data bytes.Buffer +} + +func main() { + packageName := flag.String("pkg", "", "Set the package name of the output file.") + flag.Parse() + + if *packageName == "" { + log.Fatal("pkg is required") + } + + var m dataMap + m.PackageName = *packageName + + for _, pattern := range flag.Args() { + matches, err := filepath.Glob(pattern) + if err != nil { + log.Fatalf("glob pattern '%s': %v", pattern, err) + } + for _, match := range matches { + f, err := os.Stat(match) + if err != nil { + log.Fatalf("stat file '%s': %v", match, err) + } + if f.IsDir() { + continue + } + m.Files = append(m.Files, &file{ + Path: match, + }) + } + } + sort.Slice(m.Files, func(i, j int) bool { return m.Files[i].Path < m.Files[j].Path }) + + var n int + lb := &lineBreaker{out: &m.Data} + enc := base64.NewEncoder(base64.URLEncoding, lb) + w := gzip.NewWriter(enc) + for _, file := range m.Files { + data, err := ioutil.ReadFile(file.Path) + if err != nil { + log.Fatalf("read file '%s': %v", file.Path, err) + } + file.DataStart = n + file.DataLen = len(data) + n += file.DataLen + w.Write(data) + } + w.Close() + enc.Close() + lb.Close() + + gen := func(filename string, tmpl *template.Template) { + + buf := bytes.NewBuffer(nil) + err := tmpl.Execute(buf, &m) + if err != nil { + log.Fatal("render:", err) + } + + data, err := imports.Process(filename, buf.Bytes(), nil) + if err != nil { + log.Fatal("format:", err) + } + + err = ioutil.WriteFile(filename, data, 0644) + if err != nil { + log.Fatal("save:", err) + } + } + + gen("inline_types_gen.go", typesTmpl) + gen("inline_data_gen.go", dataTmpl) +} diff --git a/devtools/mockslack/api.go b/devtools/mockslack/api.go new file mode 100644 index 0000000000..b47d763263 --- /dev/null +++ b/devtools/mockslack/api.go @@ -0,0 +1,26 @@ +package mockslack + +// API allows making calls to implemented Slack API methods. +// +// API methods implement permission/scope checking. +type API state + +// API returns an API instance. +func (st *state) API() *API { return (*API)(st) } + +// Channel represents a Slack channel or group. +type Channel struct { + ID string `json:"id"` + Name string `json:"name"` + + IsChannel bool `json:"is_channel"` + IsGroup bool `json:"is_group"` + IsArchived bool `json:"is_archived"` +} + +// Message represents a Slack message. +type Message struct { + TS string `json:"ts"` + Text string `json:"text"` + User string `json:"user"` +} diff --git a/devtools/mockslack/authrevoke.go b/devtools/mockslack/authrevoke.go new file mode 100644 index 0000000000..5dc2848cab --- /dev/null +++ b/devtools/mockslack/authrevoke.go @@ -0,0 +1,43 @@ +package mockslack + +import ( + "context" + "net/http" +) + +// AuthRevoke will revoke the auth token from the provided context. +func (st *API) AuthRevoke(ctx context.Context, test bool) (bool, error) { + st.mx.Lock() + defer st.mx.Unlock() + id := tokenID(ctx) + tok := st.tokens[id] + if tok == nil { + return false, &response{Err: "invalid_auth"} + } + + if !test { + delete(st.tokens, id) + return true, nil + } + + return false, nil +} + +// ServeAuthRevoke implements the auth.revoke API call. +// +// https://api.slack.com/methods/auth.revoke +func (s *Server) ServeAuthRevoke(w http.ResponseWriter, req *http.Request) { + revoked, err := s.API().AuthRevoke(req.Context(), req.FormValue("test") != "") + if respondErr(w, err) { + return + } + + var resp struct { + response + Revoked bool `json:"revoked"` + } + resp.OK = true + resp.Revoked = revoked + + respondWith(w, resp) +} diff --git a/devtools/mockslack/channelscreate.go b/devtools/mockslack/channelscreate.go new file mode 100644 index 0000000000..be693665a2 --- /dev/null +++ b/devtools/mockslack/channelscreate.go @@ -0,0 +1,105 @@ +package mockslack + +import ( + "context" + "net/http" + "strings" +) + +// ConversationCreateOpts is used to configure a new +// channel or group. +type ConversationCreateOpts struct { + Name string + Validate bool +} + +// cleanChannelName will replace invalid characters with `_` and +// truncate the name if it is longer than 21 characters. +func cleanChannelName(name string) string { + name = strings.Map(func(r rune) rune { + if r >= 'a' && r <= 'z' { + return r + } + if r >= 'A' && r <= 'Z' { + // make lower-case + return r + ('a' - 'A') + } + if r >= '0' && r <= '9' { + return r + } + if r == '-' || r == '_' { + return r + } + return '_' + }, name) + + if len(name) > 21 { + name = name[:21] + } + + return name +} + +func validateChannelName(name string) error { + if name == "" { + return &response{Err: "invalid_name_required"} + } + if len(name) > 21 { + return &response{Err: "invalid_name_maxlength"} + } + if name != cleanChannelName(name) { + return &response{Err: "invalid_name"} + } + if !strings.ContainsAny(name, "abcdefghijklmnopqrstuvwxyz0123456789") { + return &response{Err: "invalid_name_punctuation"} + } + + return nil +} + +// ChannelsCreate is used to create a channel. +func (st *API) ChannelsCreate(ctx context.Context, opts ConversationCreateOpts) (*Channel, error) { + err := checkPermission(ctx, "channels:write") + if err != nil { + return nil, err + } + + if !opts.Validate { + opts.Name = cleanChannelName(opts.Name) + } + err = validateChannelName(opts.Name) + if err != nil { + return nil, err + } + + ch := Channel{ + ID: st.gen.ChannelID(), + Name: opts.Name, + IsChannel: true, + } + + st.mx.Lock() + st.channels[ch.ID] = &channelState{Channel: ch} + st.mx.Unlock() + + return &ch, nil +} + +// ServeChannelsCreate serves a request to the `channels.create` API call. +// +// https://api.slack.com/methods/channels.create +func (s *Server) ServeChannelsCreate(w http.ResponseWriter, req *http.Request) { + ch, err := s.API().ChannelsCreate(req.Context(), ConversationCreateOpts{Name: req.FormValue("name"), Validate: req.FormValue("validate") == "true"}) + if respondErr(w, err) { + return + } + + var resp struct { + response + Channel *Channel `json:"channel"` + } + resp.OK = true + resp.Channel = ch + + respondWith(w, resp) +} diff --git a/devtools/mockslack/chatpostmessage.go b/devtools/mockslack/chatpostmessage.go new file mode 100644 index 0000000000..32aa4d3e9c --- /dev/null +++ b/devtools/mockslack/chatpostmessage.go @@ -0,0 +1,115 @@ +package mockslack + +import ( + "context" + "net/http" + "strconv" + "time" +) + +// ChatPostMessageOptions are parameters for a `chat.postMessage` call. +type ChatPostMessageOptions struct { + ChannelID string + Text string + + AsUser bool + + ThreadTS string +} + +func (ch *channelState) nextTS() string { + t := time.Now() + if !t.After(ch.TS) { + t = ch.TS.Add(1) + } + ch.TS = t + + return strconv.FormatFloat(time.Duration(t.UnixNano()).Seconds(), 'f', -1, 64) +} + +// ChatPostMessage posts a message to a channel. +func (st *API) ChatPostMessage(ctx context.Context, opts ChatPostMessageOptions) (*Message, error) { + var err error + var user string + if opts.AsUser { + err = checkPermission(ctx, "chat:write:user") + user = userID(ctx) + } else { + err = checkPermission(ctx, "bot", "chat:write:bot") + user = botID(ctx) + } + if err != nil { + return nil, err + } + + if len(opts.Text) > 40000 { + return nil, &response{Err: "msg_too_long"} + } + + st.mx.Lock() + defer st.mx.Unlock() + + ch := st.channels[opts.ChannelID] + if ch == nil { + if !st.flags.autoCreateChannel { + return nil, &response{Err: "channel_not_found"} + } + + // auto create channel + ch = &channelState{Channel: Channel{ + ID: opts.ChannelID, + Name: cleanChannelName(opts.ChannelID), + IsChannel: true, + }} + if opts.AsUser { + // add the user if needed + ch.Users = append(ch.Users, userID(ctx)) + } + + st.channels[opts.ChannelID] = ch + } + + if opts.AsUser && !contains(ch.Users, userID(ctx)) { + return nil, &response{Err: "not_in_channel"} + } + + if ch.IsArchived { + return nil, &response{Err: "is_archived"} + } + + msg := &Message{ + TS: ch.nextTS(), + Text: opts.Text, + User: user, + } + ch.Messages = append(ch.Messages, msg) + + return msg, nil +} + +// ServeChatPostMessage serves a request to the `chat.postMessage` API call. +// +// https://api.slack.com/methods/chat.postMessage +func (s *Server) ServeChatPostMessage(w http.ResponseWriter, req *http.Request) { + chanID := req.FormValue("channel") + msg, err := s.API().ChatPostMessage(req.Context(), ChatPostMessageOptions{ + ChannelID: chanID, + Text: req.FormValue("text"), + AsUser: req.FormValue("as_user") == "true", + ThreadTS: req.FormValue("thread_ts"), + }) + if respondErr(w, err) { + return + } + + var respData struct { + response + Channel string `json:"channel"` + Message *Message `json:"message"` + } + respData.OK = true + respData.Channel = chanID + respData.Message = msg + + respondWith(w, respData) +} diff --git a/devtools/mockslack/cmd/mockslack/main.go b/devtools/mockslack/cmd/mockslack/main.go new file mode 100644 index 0000000000..4739e67f1d --- /dev/null +++ b/devtools/mockslack/cmd/mockslack/main.go @@ -0,0 +1,76 @@ +package main + +import ( + "flag" + "log" + "net/http" + "strings" + + "github.com/target/goalert/devtools/mockslack" +) + +func main() { + addr := flag.String("addr", "localhost:8085", "Address to listen on.") + prefix := flag.String("prefix", "", "API URL prefix.") + appName := flag.String("app-name", "GoAlert", "Name of the initial app.") + clientID := flag.String("client-id", "", "Default client ID.") + clientSecret := flag.String("client-secret", "", "Default client secret.") + accessToken := flag.String("access-token", "", "Default access token.") + channels := flag.String("channels", "general,test,foobar", "Comma-delimited list of initial channels.") + autoChannel := flag.Bool("auto-channel", false, "Automatically create missing channels on chat.postMessage calls.") + scopes := flag.String("scopes", "bot", "Comma-delimited list of scopes to add for the initial app.") + singleUser := flag.String("single-user", "", "If set, all requests will be implicitly authenticated.") + flag.Parse() + + log.SetFlags(log.Lshortfile) + + srv := mockslack.NewServer() + + app, err := srv.InstallStaticApp(mockslack.AppInfo{ + Name: *appName, + ClientID: *clientID, + ClientSecret: *clientSecret, + AccessToken: *accessToken, + }, strings.Split(*scopes, ",")...) + if err != nil { + log.Fatal(err) + } + log.Printf("AppName = %s", app.Name) + log.Printf("ClientID = %s", app.ClientID) + log.Printf("ClientSecret = %s", app.ClientSecret) + log.Printf("AccessToken = %s", app.AccessToken) + + if *channels != "" { + for _, ch := range strings.Split(*channels, ",") { + srv.NewChannel(ch) + } + } + srv.SetAutoCreateChannel(*autoChannel) + + h := http.Handler(srv) + if *prefix != "" { + h = http.StripPrefix(*prefix, h) + } + + if *singleUser != "" { + usr := srv.NewUser(*singleUser) + log.Printf("S. UserID = %s", usr.ID) + log.Printf("S. UserName = %s", usr.Name) + log.Printf("S. UserAuth = %s", usr.AuthToken) + + next := h + h = http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + req.AddCookie(&http.Cookie{ + Name: "slack_token", + Value: usr.AuthToken, + }) + next.ServeHTTP(w, req) + }) + } + + log.Println("Listening:", *addr) + err = http.ListenAndServe(*addr, h) + if err != nil { + log.Fatal(err) + } +} diff --git a/devtools/mockslack/conversationsinfo.go b/devtools/mockslack/conversationsinfo.go new file mode 100644 index 0000000000..9bc8c62ee6 --- /dev/null +++ b/devtools/mockslack/conversationsinfo.go @@ -0,0 +1,61 @@ +package mockslack + +import ( + "context" + "net/http" +) + +// ConversationsInfo returns information about a conversation. +func (st *API) ConversationsInfo(ctx context.Context, id string) (*Channel, error) { + err := checkPermission(ctx, "bot", "channels:read", "groups:read", "im:read", "mpim:read") + if err != nil { + return nil, err + } + + st.mx.Lock() + defer st.mx.Unlock() + + ch := st.channels[id] + if ch == nil { + return nil, &response{Err: "channel_not_found"} + } + + if hasScope(ctx, "bot") { + return &ch.Channel, nil + } + + if ch.IsGroup { + err = checkPermission(ctx, "groups:read") + } else { + err = checkPermission(ctx, "channels:read") + } + if err != nil { + return nil, err + } + + if ch.IsGroup && !contains(ch.Users, userID(ctx)) { + // user is not a member of the group + return nil, &response{Err: "channel_not_found"} + } + + return &ch.Channel, nil +} + +// ServeConversationsInfo serves a request to the `conversations.info` API call. +// +// https://api.slack.com/methods/conversations.info +func (s *Server) ServeConversationsInfo(w http.ResponseWriter, req *http.Request) { + ch, err := s.API().ConversationsInfo(req.Context(), req.FormValue("channel")) + if respondErr(w, err) { + return + } + + var resp struct { + response + Channel *Channel `json:"channel"` + } + resp.OK = true + resp.Channel = ch + + respondWith(w, resp) +} diff --git a/devtools/mockslack/conversationslist.go b/devtools/mockslack/conversationslist.go new file mode 100644 index 0000000000..a663aaf578 --- /dev/null +++ b/devtools/mockslack/conversationslist.go @@ -0,0 +1,153 @@ +package mockslack + +import ( + "context" + "encoding/base64" + "math/rand" + "net/http" + "sort" + "strconv" + "strings" +) + +// ConversationsListOpts contains parameters for the ConversationsList API call. +type ConversationsListOpts struct { + Cursor string + ExcludeArchived bool + Limit int + Types string +} + +// ConversationsList returns a list of channel-like conversations in a workspace. +func (st *API) ConversationsList(ctx context.Context, opts ConversationsListOpts) ([]Channel, string, error) { + err := checkPermission(ctx, "bot", "channels:read", "groups:read", "im:read", "mpim:read") + if err != nil { + return nil, "", err + } + inclArchived := !opts.ExcludeArchived + inclPrivate := strings.Contains(opts.Types, "private_channel") + inclPublic := strings.Contains(opts.Types, "public_channel") || opts.Types == "" + + if inclPublic && !hasScope(ctx, "bot", "channels:read") { + return nil, "", &response{Err: "invalid_types"} + } + if inclPrivate && !hasScope(ctx, "bot", "groups:read") { + return nil, "", &response{Err: "invalid_types"} + } + + isBot := botID(ctx) != "" + uid := userID(ctx) + var cursorID string + if opts.Cursor != "" { + data, err := base64.URLEncoding.DecodeString(opts.Cursor) + if err != nil { + return nil, "", &response{Err: "invalid_cursor"} + } + cursorID = string(data) + opts.Cursor = "" + } + filter := func(ch *channelState) bool { + if ch == nil { + return false + } + if cursorID != "" && cursorID >= ch.ID { + return false + } + if ch.IsArchived && !inclArchived { + return false + } + + if ch.IsGroup && !inclPrivate { + return false + } + if !ch.IsGroup && !inclPublic { + return false + } + + if ch.IsGroup && !isBot && !contains(ch.Users, uid) { + return false + } + + return true + } + + if opts.Limit == 0 { + opts.Limit = 100 + } + if opts.Limit > 1000 { + return nil, "", &response{Err: "invalid_limit"} + } + + st.mx.Lock() + defer st.mx.Unlock() + + ids := make([]string, 0, len(st.channels)) + for id := range st.channels { + ids = append(ids, id) + } + sort.Strings(ids) + + result := make([]Channel, 0, len(ids)) + for _, id := range ids { + ch := st.channels[id] + if !filter(ch) { + continue + } + result = append(result, ch.Channel) + } + + originalTotal := len(result) + if len(result) > opts.Limit { + result = result[:opts.Limit] + } + + if len(result) > 1 { + // limit is never guaranteed (only as max) as per the docs + // so ensure it's handled by randomizing number of returned items + max := rand.Intn(len(result)) + 1 + result = result[:max] + } + + if originalTotal > len(result) && len(result) > 0 { + opts.Cursor = base64.URLEncoding.EncodeToString([]byte(result[len(result)-1].ID)) + } + + return result, opts.Cursor, nil +} + +// ServeConversationsList serves a request to the `conversations.list` API call. +// +// https://api.slack.com/methods/conversations.list +func (s *Server) ServeConversationsList(w http.ResponseWriter, req *http.Request) { + var limit int + limitStr := req.FormValue("limit") + var err error + if limitStr != "" { + limit, err = strconv.Atoi(limitStr) + if err != nil { + respondWith(w, &response{Err: "invalid_limit"}) + return + } + } + + chans, cur, err := s.API().ConversationsList(req.Context(), ConversationsListOpts{ + Cursor: req.FormValue("cursor"), + Limit: limit, + Types: req.FormValue("types"), + ExcludeArchived: req.FormValue("exclude_archived") == "true", + }) + if respondErr(w, err) { + return + } + + var resp struct { + response + Channels []Channel `json:"channels"` + } + + resp.Meta.Cursor = cur + resp.Channels = chans + resp.OK = true + + respondWith(w, resp) +} diff --git a/devtools/mockslack/conversationslist_test.go b/devtools/mockslack/conversationslist_test.go new file mode 100644 index 0000000000..605495c70c --- /dev/null +++ b/devtools/mockslack/conversationslist_test.go @@ -0,0 +1,67 @@ +package mockslack + +import ( + "context" + "sort" + "testing" +) + +func TestState_ConversationsList(t *testing.T) { + st := newState() + + chans := []ChannelInfo{ + st.NewChannel("foo"), + st.NewChannel("bar"), + st.NewChannel("baz"), + } + sort.Slice(chans, func(i, j int) bool { return chans[i].ID < chans[j].ID }) + + ctx := context.Background() + ch, _, err := st.API().ConversationsList(ctx, ConversationsListOpts{Limit: 1}) + if err == nil { + t.Error("got nil; expected permissions error") + } + + ctx = WithToken(ctx, &AuthToken{Scopes: []string{"channels:read"}}) + + check := func(idx int) { + t.Helper() + if len(ch) != 1 { + t.Fatalf("got len=%d; want 1", len(ch)) + } + if ch[0].ID != chans[idx].ID { + t.Errorf("ID[%d]=%s; want %s", idx, ch[0].ID, chans[idx].ID) + } + if ch[0].Name != chans[idx].Name { + t.Errorf("Name[%d]=%s; want %s", idx, ch[0].Name, chans[idx].Name) + } + } + + ch, cur, err := st.API().ConversationsList(ctx, ConversationsListOpts{Limit: 1}) + if err != nil { + t.Errorf("err=%v; expected nil", err) + } + if cur == "" { + t.Errorf("got empty cursor; expected next page") + } + check(0) + + ch, cur, err = st.API().ConversationsList(ctx, ConversationsListOpts{Limit: 1, Cursor: cur}) + if err != nil { + t.Errorf("err=%v; expected nil", err) + } + if cur == "" { + t.Errorf("got empty cursor; expected next page") + } + check(1) + + ch, cur, err = st.API().ConversationsList(ctx, ConversationsListOpts{Limit: 1, Cursor: cur}) + if err != nil { + t.Errorf("err=%v; expected nil", err) + } + if cur != "" { + t.Errorf("cursor=%s; expected empty", cur) + } + check(2) + +} diff --git a/devtools/mockslack/groupscreate.go b/devtools/mockslack/groupscreate.go new file mode 100644 index 0000000000..f6a5132c63 --- /dev/null +++ b/devtools/mockslack/groupscreate.go @@ -0,0 +1,53 @@ +package mockslack + +import ( + "context" + "net/http" +) + +// GroupsCreate is used to create a channel. +func (st *API) GroupsCreate(ctx context.Context, opts ConversationCreateOpts) (*Channel, error) { + err := checkPermission(ctx, "groups:write") + if err != nil { + return nil, err + } + + if !opts.Validate { + opts.Name = cleanChannelName(opts.Name) + } + err = validateChannelName(opts.Name) + if err != nil { + return nil, err + } + + ch := Channel{ + ID: st.gen.GroupID(), + Name: opts.Name, + IsGroup: true, + } + + st.mx.Lock() + st.channels[ch.ID] = &channelState{Channel: ch} + st.mx.Unlock() + + return &ch, nil +} + +// ServeGroupsCreate serves a request to the `Groups.create` API call. +// +// https://api.slack.com/methods/Groups.create +func (s *Server) ServeGroupsCreate(w http.ResponseWriter, req *http.Request) { + ch, err := s.API().GroupsCreate(req.Context(), ConversationCreateOpts{Name: req.FormValue("name"), Validate: req.FormValue("validate") == "true"}) + if respondErr(w, err) { + return + } + + var resp struct { + response + Group *Channel `json:"group"` + } + resp.OK = true + resp.Group = ch + + respondWith(w, resp) +} diff --git a/devtools/mockslack/http.go b/devtools/mockslack/http.go new file mode 100644 index 0000000000..16332a9793 --- /dev/null +++ b/devtools/mockslack/http.go @@ -0,0 +1,44 @@ +package mockslack + +import ( + "encoding/json" + "log" + "net/http" +) + +type response struct { + OK bool `json:"ok"` + Err string `json:"error,omitempty"` + Meta struct { + Cursor string `json:"next_cursor,omitempty"` + } `json:"response_metadata,omitempty"` +} + +func respondWith(w http.ResponseWriter, data interface{}) { + w.Header().Set("content-type", "application/json") + err := json.NewEncoder(w).Encode(data) + if err != nil { + log.Println("ERROR:", err) + } +} + +func respondErr(w http.ResponseWriter, err error) bool { + if err == nil { + return false + } + + respondWith(w, err) + return true +} + +type middlewareFunc func(http.HandlerFunc) http.HandlerFunc + +func middleware(h http.Handler, fns ...middlewareFunc) http.Handler { + + for i := range fns { + // apply in reverse order + h = fns[len(fns)-1-i](h.ServeHTTP) + } + + return h +} diff --git a/devtools/mockslack/login.go b/devtools/mockslack/login.go new file mode 100644 index 0000000000..1a20c76a6c --- /dev/null +++ b/devtools/mockslack/login.go @@ -0,0 +1,110 @@ +package mockslack + +import ( + "html/template" + "log" + "net/http" + "net/url" + "strings" +) + +var loginPage = template.Must( + template.New("login"). + Funcs(template.FuncMap{"StringsJoin": strings.Join}). + Parse(` + + + + + + + Mock Slack - Login + + +
+

Login

+

Select an existing user, or create a new one.

+
+ +
+ {{- range $key, $value := .Data}} + + {{- end}} + + {{range .Users}} +
+ {{end}} +
+ +
+ + + +
+ + +
+ +
+ + +`)) + +func (s *Server) loginMiddleware(next http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + if strings.HasPrefix(req.URL.Path, "/api") || ContextToken(req.Context()) != nil { + next(w, req) + return + } + + if req.FormValue("action") == "login" { + userID := req.FormValue("userID") + + if userID == "new" { + usr := s.newUser(User{Name: req.FormValue("newUserName")}) + userID = usr.ID + } + + tok := s.newToken(AuthToken{ + User: userID, + Scopes: []string{"user"}, + }) + + http.SetCookie(w, &http.Cookie{ + Name: TokenCookieName, + Value: tok.ID, + Path: "/", + }) + + next(w, req.WithContext(WithToken(req.Context(), tok))) + return + } + + var renderContext struct { + Users []User + Data url.Values + } + + renderContext.Data = req.Form + + // remove used fields, if they existed + renderContext.Data.Del("userID") + renderContext.Data.Del("newUserName") + renderContext.Data.Del("action") + + // show login + err := loginPage.Execute(w, renderContext) + if err != nil { + log.Println("ERROR:", err) + } + } +} diff --git a/devtools/mockslack/oauthaccess.go b/devtools/mockslack/oauthaccess.go new file mode 100644 index 0000000000..4ff1d465f7 --- /dev/null +++ b/devtools/mockslack/oauthaccess.go @@ -0,0 +1,57 @@ +package mockslack + +import ( + "context" + "net/http" +) + +// OAuthAccessOpts contains parameters for an OAuthAccess API call. +type OAuthAccessOpts struct { + ClientID string + ClientSecret string + Code string +} + +// OAuthAccess will exchange a temporary code for an access token. +func (st *API) OAuthAccess(ctx context.Context, opts OAuthAccessOpts) (*AuthToken, error) { + st.mx.Lock() + defer st.mx.Unlock() + + app := st.apps[opts.ClientID] + if app == nil { + return nil, &response{Err: "invalid_client_id"} + } + + if app.Secret != opts.ClientSecret { + return nil, &response{Err: "bad_client_secret"} + } + + tok := st.tokenCodes[opts.Code] + if tok == nil || tok.ClientID != opts.ClientID { + return nil, &response{Err: "invalid_code"} + } + + delete(st.tokenCodes, opts.Code) + + return tok.AuthToken, nil +} + +// ServeOAuthAccess serves a request to the `oauth.access` API call. +// +// https://api.slack.com/methods/oauth.access +func (s *Server) ServeOAuthAccess(w http.ResponseWriter, req *http.Request) { + usr, pass, _ := req.BasicAuth() + tok, err := s.API().OAuthAccess(req.Context(), OAuthAccessOpts{ClientID: usr, ClientSecret: pass, Code: req.FormValue("code")}) + if respondErr(w, err) { + return + } + + var resp struct { + AccessToken string `json:"access_token"` + UserID string `json:"user_id"` + } + resp.AccessToken = tok.ID + resp.UserID = tok.User + + respondWith(w, resp) +} diff --git a/devtools/mockslack/oauthauthorize.go b/devtools/mockslack/oauthauthorize.go new file mode 100644 index 0000000000..28639f90c1 --- /dev/null +++ b/devtools/mockslack/oauthauthorize.go @@ -0,0 +1,110 @@ +package mockslack + +import ( + "html/template" + "log" + "net/http" + "net/url" + "strings" +) + +var authPage = template.Must( + template.New("authorize"). + Funcs(template.FuncMap{"StringsJoin": strings.Join}). + Parse(` + + + + + + + Mock Slack - Authorize + + +
+

Authorize

+

Logged in as: {{.UserName}}

+

Allow the application {{.AppName}} access to the following scopes:

+
    + {{range .Scopes}} +
  • {{.}}
  • + {{end}} +
+
+
+ {{- range $key, $value := .Data}} + + {{- end}} + + + + +
+ +
+ + +`)) + +func (s *Server) ServeOAuthAuthorize(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + if respondErr(w, checkPermission(ctx, "user")) { + return + } + + clientID := req.FormValue("client_id") + var renderData struct { + AppName string + UserName string + Scopes []string + Data url.Values + } + renderData.Data = req.Form + + redir, err := url.Parse(req.FormValue("redirect_uri")) + if err != nil { + respondWith(w, &response{Err: "bad_redirect_uri"}) + return + } + + errResp := func(msg string) { + q := redir.Query() + q.Set("state", req.FormValue("state")) + q.Set("error", msg) + redir.RawQuery = q.Encode() + http.Redirect(w, req, redir.String(), http.StatusFound) + } + + app := s.app(clientID) + if app == nil { + errResp("invalid_client_id") + return + } + renderData.AppName = app.Name + + if req.FormValue("action") == "cancel" { + errResp("access_denied") + return + } + + uid := userID(ctx) + renderData.UserName = s.user(uid).Name + scopes := strings.Split(req.FormValue("scope"), " ") + renderData.Scopes = scopes + if req.FormValue("action") != "confirm" { + err = authPage.Execute(w, renderData) + if err != nil { + log.Println("ERROR:", err) + } + return + } + + code := s.addUserAppScope(uid, clientID, scopes...) + + q := redir.Query() + q.Del("error") + q.Set("code", code) + q.Set("state", req.FormValue("state")) + redir.RawQuery = q.Encode() + http.Redirect(w, req, redir.String(), http.StatusFound) +} diff --git a/devtools/mockslack/permissions.go b/devtools/mockslack/permissions.go new file mode 100644 index 0000000000..f68a322b31 --- /dev/null +++ b/devtools/mockslack/permissions.go @@ -0,0 +1,123 @@ +package mockslack + +import ( + "context" + "net/http" + "strings" +) + +type contextKey int + +const ( + contextKeyToken contextKey = iota +) + +// AuthToken represents a state of authorization with the Slack server. +type AuthToken struct { + ID string + Scopes []string + User string + IsBot bool +} + +// WithToken will return a new context authorized for API calls with the given AuthToken. +func WithToken(ctx context.Context, tok *AuthToken) context.Context { + if tok == nil { + return ctx + } + cpy := *tok + cpy.Scopes = make([]string, len(tok.Scopes)) + copy(cpy.Scopes, tok.Scopes) + return context.WithValue(ctx, contextKeyToken, cpy) +} + +// ContextToken will return a copy of the AuthToken from the given context. +func ContextToken(ctx context.Context) *AuthToken { + tok, ok := ctx.Value(contextKeyToken).(AuthToken) + if !ok { + return nil + } + return &tok +} + +func tokenID(ctx context.Context) string { + tok := ContextToken(ctx) + if tok == nil { + return "" + } + return tok.ID +} +func userID(ctx context.Context) string { + tok := ContextToken(ctx) + if tok == nil || tok.IsBot { + return "" + } + + return tok.User +} +func botID(ctx context.Context) string { + tok := ContextToken(ctx) + if tok == nil || !tok.IsBot { + return "" + } + + return tok.User +} + +type scopeError struct { + response + Needed string `json:"needed"` + Provided string `json:"provided"` +} + +func (r response) Error() string { return r.Err } + +func hasScope(ctx context.Context, scopes ...string) bool { + return ContextToken(ctx).hasScope(scopes...) +} + +func (tok *AuthToken) hasScope(scopes ...string) bool { + if tok == nil { + return false + } + for _, scope := range scopes { + for _, tokenScope := range tok.Scopes { + if scope == tokenScope { + return true + } + } + } + + return false +} + +func checkPermission(ctx context.Context, scopes ...string) error { + tok := ContextToken(ctx) + if tok == nil { + return response{Err: "not_authed"} + } + + if len(scopes) == 0 || tok.hasScope(scopes...) { + return nil + } + + return &scopeError{ + Needed: strings.Join(scopes, ","), + Provided: strings.Join(tok.Scopes, ","), + response: response{Err: "missing_scope"}, + } +} + +func (s *Server) tokenMiddleware(next http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + tok := s.token(req.FormValue("token")) + if tok == nil && strings.HasPrefix(req.Header.Get("Authorization"), "Bearer ") { + tok = s.token(strings.TrimPrefix(req.Header.Get("Authorization"), "Bearer ")) + } + if c, _ := req.Cookie(TokenCookieName); tok == nil && c != nil { + tok = s.token(c.Value) + } + + next(w, req.WithContext(WithToken(req.Context(), tok))) + } +} diff --git a/devtools/mockslack/server.go b/devtools/mockslack/server.go new file mode 100644 index 0000000000..9b20bf6682 --- /dev/null +++ b/devtools/mockslack/server.go @@ -0,0 +1,204 @@ +package mockslack + +import ( + "encoding/json" + "log" + "net/http" + + "github.com/davecgh/go-spew/spew" + "github.com/pkg/errors" +) + +// Server implements a mock Slack API. +type Server struct { + *state + + mux *http.ServeMux + + handler http.Handler +} + +// NewServer creates a new blank Server. +func NewServer() *Server { + srv := &Server{ + mux: http.NewServeMux(), + state: newState(), + } + + srv.mux.HandleFunc("/api/chat.postMessage", srv.ServeChatPostMessage) + srv.mux.HandleFunc("/api/conversations.info", srv.ServeConversationsInfo) + srv.mux.HandleFunc("/api/conversations.list", srv.ServeConversationsList) + srv.mux.HandleFunc("/api/users.conversations", srv.ServeConversationsList) // same data + srv.mux.HandleFunc("/api/oauth.access", srv.ServeOAuthAccess) + srv.mux.HandleFunc("/api/auth.revoke", srv.ServeAuthRevoke) + srv.mux.HandleFunc("/api/channels.create", srv.ServeChannelsCreate) + srv.mux.HandleFunc("/api/groups.create", srv.ServeGroupsCreate) + // TODO: history, leave, join + srv.mux.HandleFunc("/oauth/authorize", srv.ServeOAuthAuthorize) + + srv.mux.HandleFunc("/stats", func(w http.ResponseWriter, req *http.Request) { + srv.state.mx.Lock() + defer srv.state.mx.Unlock() + spew.Fdump(w) + }) + + // handle 404/unknown api methods + srv.mux.HandleFunc("/api/", func(w http.ResponseWriter, req *http.Request) { + err := json.NewEncoder(w).Encode(response{Err: "unknown_method"}) + if err != nil { + log.Println("ERROR:", err) + } + }) + + srv.mux.HandleFunc("/state", func(w http.ResponseWriter, req *http.Request) { + srv.state.mx.Lock() + defer srv.state.mx.Unlock() + spew.Fdump(w, srv.state) + }) + + srv.handler = middleware(srv.mux, + srv.tokenMiddleware, + srv.loginMiddleware, + ) + + return srv +} + +// TokenCookieName is the name of a cookie containing a token for a user session. +const TokenCookieName = "slack_token" + +// AppInfo contains information for an installed Slack app. +type AppInfo struct { + Name string + ClientID string + ClientSecret string + AccessToken string +} + +// InstallApp will "install" a new app to this Slack server using pre-configured AppInfo. +func (st *state) InstallStaticApp(app AppInfo, scopes ...string) (*AppInfo, error) { + st.mx.Lock() + defer st.mx.Unlock() + + if app.ClientID == "" { + app.ClientID = st.gen.ClientID() + } + if app.ClientSecret == "" { + app.ClientSecret = st.gen.ClientSecret() + } + if app.AccessToken == "" { + app.AccessToken = st.gen.UserAccessToken() + } + + if !clientIDRx.MatchString(app.ClientID) { + return nil, errors.Errorf("invalid client ID format: %s", app.ClientID) + } + if !clientSecretRx.MatchString(app.ClientSecret) { + return nil, errors.Errorf("invalid client secret format: %s", app.ClientSecret) + } + if !userAccessTokenRx.MatchString(app.AccessToken) { + return nil, errors.Errorf("invalid access token format: %s", app.AccessToken) + } + + for _, scope := range scopes { + if !scopeRx.MatchString(scope) { + panic("invalid scope format: " + scope) + } + } + + tok := &AuthToken{ + ID: app.AccessToken, + Scopes: scopes, + User: app.ClientID, + } + + st.tokens[tok.ID] = tok + st.apps[tok.User] = &appState{ + App: App{ + ID: app.ClientID, + Name: app.Name, + Secret: app.ClientSecret, + AuthToken: tok, + }, + } + + return &app, nil +} + +// InstallApp will "install" a new app to this Slack server. +func (st *state) InstallApp(name string, scopes ...string) AppInfo { + app, err := st.InstallStaticApp(AppInfo{Name: name}, scopes...) + if err != nil { + // should not happen, since empty values are generated + panic(err) + } + return *app +} + +// UserInfo contains information for a newly created user. +type UserInfo struct { + ID string + Name string + AuthToken string +} + +// NewUser will create a new Slack user with the given name. +func (st *state) NewUser(name string) UserInfo { + usr := st.newUser(User{Name: name}) + tok := st.newToken(AuthToken{ + User: usr.ID, + Scopes: []string{"user"}, + }) + + return UserInfo{ + ID: usr.ID, + Name: usr.Name, + AuthToken: tok.ID, + } +} + +// ChannelInfo contains information about a newly created Slack channel. +type ChannelInfo struct { + ID, Name string +} + +// NewChannel will create a new Slack channel with the given name. +func (st *state) NewChannel(name string) ChannelInfo { + info := ChannelInfo{ + ID: st.gen.ChannelID(), + Name: name, + } + + st.mx.Lock() + st.channels[info.ID] = &channelState{Channel: Channel{ + ID: info.ID, + Name: info.Name, + IsChannel: true, + }} + st.mx.Unlock() + + return info +} + +// Messages will return all messages from a given channel/group. +func (st *state) Messages(chanID string) []Message { + st.mx.Lock() + defer st.mx.Unlock() + ch := st.channels[chanID] + if ch == nil { + return nil + } + + result := make([]Message, len(ch.Messages)) + for i, msg := range ch.Messages { + result[i] = *msg + } + + return result +} + +// ServeHTTP serves the Slack API. +func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { + log.Printf("%s %s", req.Method, req.URL.Path) + s.handler.ServeHTTP(w, req) +} diff --git a/devtools/mockslack/state.go b/devtools/mockslack/state.go new file mode 100644 index 0000000000..04ff770d38 --- /dev/null +++ b/devtools/mockslack/state.go @@ -0,0 +1,92 @@ +package mockslack + +import ( + "sync" + "time" +) + +type state struct { + mx sync.Mutex + + gen *idGen + + flags struct { + autoCreateChannel bool + } + + apps map[string]*appState + channels map[string]*channelState + tokens map[string]*AuthToken + users map[string]*userState + tokenCodes map[string]*tokenCode +} + +func newState() *state { + return &state{ + gen: newIDGen(), + apps: make(map[string]*appState), + channels: make(map[string]*channelState), + tokens: make(map[string]*AuthToken), + users: make(map[string]*userState), + tokenCodes: make(map[string]*tokenCode), + } +} + +type tokenCode struct { + ClientID string + *AuthToken +} + +type appState struct { + App +} +type App struct { + ID string + Name string + Secret string + AuthToken *AuthToken +} + +type channelState struct { + Channel + + TS time.Time + Users []string + Messages []*Message +} + +// SetAutoCreateChannel, if set to true, will cause messages sent to +// non-existant channels to succeed by creating the channel automatically. +func (st *state) SetAutoCreateChannel(value bool) { + st.mx.Lock() + defer st.mx.Unlock() + + st.flags.autoCreateChannel = value +} + +func (st *state) token(id string) *AuthToken { + st.mx.Lock() + defer st.mx.Unlock() + + return st.tokens[id] +} +func (st *state) app(id string) *appState { + st.mx.Lock() + defer st.mx.Unlock() + + return st.apps[id] +} + +func (st *state) newToken(a AuthToken) *AuthToken { + st.mx.Lock() + defer st.mx.Unlock() + if a.ID == "" { + if a.IsBot { + a.ID = st.gen.BotAccessToken() + } else { + a.ID = st.gen.UserAccessToken() + } + } + st.tokens[a.ID] = &a + return &a +} diff --git a/devtools/mockslack/user.go b/devtools/mockslack/user.go new file mode 100644 index 0000000000..bd499a9ac0 --- /dev/null +++ b/devtools/mockslack/user.go @@ -0,0 +1,55 @@ +package mockslack + +type User struct { + ID string + Name string +} +type userState struct { + User + appTokens map[string]*AuthToken +} + +func (st *state) user(id string) *userState { + st.mx.Lock() + defer st.mx.Unlock() + + return st.users[id] +} +func (st *state) newUser(u User) User { + st.mx.Lock() + defer st.mx.Unlock() + + if u.ID == "" { + u.ID = st.gen.UserID() + } + st.users[u.ID] = &userState{User: u, appTokens: make(map[string]*AuthToken)} + + return u +} + +func (st *state) addUserAppScope(userID, clientID string, scopes ...string) string { + st.mx.Lock() + defer st.mx.Unlock() + + if st.users[userID].appTokens[clientID] == nil { + tok := &AuthToken{ID: st.gen.UserAccessToken(), User: userID, Scopes: scopes} + st.tokens[tok.ID] = tok + st.users[userID].appTokens[clientID] = tok + + code := st.gen.TokenCode() + st.tokenCodes[code] = &tokenCode{AuthToken: tok, ClientID: clientID} + return code + } + + tok := st.users[userID].appTokens[clientID] + + for _, scope := range scopes { + if !contains(tok.Scopes, scope) { + tok.Scopes = append(tok.Scopes, scope) + } + } + + code := st.gen.TokenCode() + st.tokenCodes[code] = &tokenCode{AuthToken: tok, ClientID: clientID} + return code +} diff --git a/devtools/mockslack/util.go b/devtools/mockslack/util.go new file mode 100644 index 0000000000..54276a8991 --- /dev/null +++ b/devtools/mockslack/util.go @@ -0,0 +1,97 @@ +package mockslack + +import ( + "encoding/hex" + "fmt" + "math/rand" + "sync" + "time" +) + +const ( + idChars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + botChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" +) + +type idGen struct { + ts time.Time + mx sync.Mutex + tok map[string]struct{} +} + +func newIDGen() *idGen { + return &idGen{ + ts: time.Now(), + tok: make(map[string]struct{}), + } +} + +func genID(s string, n int) string { + buf := make([]byte, n) + l := len(s) + for i := range buf { + buf[i] = s[rand.Intn(l)] + } + return string(buf) +} + +func genHex(n int) string { + buf := make([]byte, n) + rand.Read(buf) + return hex.EncodeToString(buf) +} +func timeString(t time.Time) string { return fmt.Sprintf("%d", t.UnixNano()/1000000)[1:] } + +func (gen *idGen) next(fn func() string) string { + for { + id := fn() + gen.mx.Lock() + if _, ok := gen.tok[id]; !ok { + gen.tok[id] = struct{}{} + gen.mx.Unlock() + return id + } + gen.mx.Unlock() + } +} + +func (gen *idGen) ID(p string) string { + return gen.next(func() string { return p + genID(idChars, 8) }) +} +func (gen *idGen) UserID() string { return gen.ID("W") } +func (gen *idGen) AppID() string { return gen.ID("A") } +func (gen *idGen) ChannelID() string { return gen.ID("D") } +func (gen *idGen) GroupID() string { return gen.ID("G") } +func (gen *idGen) ClientSecret() string { return gen.next(func() string { return genHex(16) }) } +func (gen *idGen) SigningSecret() string { return gen.next(func() string { return genHex(16) }) } + +func (gen *idGen) TokenCode() string { + return gen.next(func() string { + return fmt.Sprintf("%s.%s.%s", timeString(gen.ts), timeString(time.Now().AddDate(2, 0, 0)), genHex(32)) + }) +} + +func (gen *idGen) ClientID() string { + return gen.next(func() string { return timeString(gen.ts) + "." + timeString(time.Now()) }) +} + +func (gen *idGen) UserAccessToken() string { + return gen.next(func() string { + return fmt.Sprintf("xoxp-%s-%s-%s-%s", timeString(gen.ts), timeString(time.Now()), timeString(time.Now().AddDate(1, 0, 0)), genHex(16)) + }) +} + +func (gen *idGen) BotAccessToken() string { + return gen.next(func() string { + return fmt.Sprintf("xoxb-%s-%s-%s", timeString(gen.ts), timeString(time.Now()), genID(botChars, 24)) + }) +} + +func contains(strs []string, val string) bool { + for _, s := range strs { + if val == s { + return true + } + } + return false +} diff --git a/devtools/mockslack/validate.go b/devtools/mockslack/validate.go new file mode 100644 index 0000000000..61ed297904 --- /dev/null +++ b/devtools/mockslack/validate.go @@ -0,0 +1,12 @@ +package mockslack + +import ( + "regexp" +) + +var ( + scopeRx = regexp.MustCompile(`^[a-z]+(:[a-z]+)*$`) + clientIDRx = regexp.MustCompile(`^[0-9]{12}.[0-9]{12}$`) + clientSecretRx = regexp.MustCompile(`^[a-z0-9]{32}$`) + userAccessTokenRx = regexp.MustCompile(`^xoxp-[0-9]{12}-[0-9]{12}-[0-9]{12}-[a-z0-9]{32}$`) +) diff --git a/devtools/mocktwilio/server.go b/devtools/mocktwilio/server.go new file mode 100644 index 0000000000..fa1ceaf77c --- /dev/null +++ b/devtools/mocktwilio/server.go @@ -0,0 +1,213 @@ +package mocktwilio + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/pkg/errors" + "github.com/target/goalert/notification/twilio" + "github.com/target/goalert/validation/validate" +) + +// Config is used to configure the mock server. +type Config struct { + + // The SID and token should match values given to the backend + // as the mock server will send and validate signatures. + AccountSID string + AuthToken string + + // MinQueueTime determines the minimum amount of time an SMS or voice + // call will sit in the queue before being processed/delivered. + MinQueueTime time.Duration +} + +// Server implements the Twilio API for SMS and Voice calls +// via the http.Handler interface. +type Server struct { + mx sync.RWMutex + callbacks map[string]string + + smsCh chan *SMS + callCh chan *VoiceCall + + errs chan error + + cfg Config + + messages map[string]*SMS + calls map[string]*VoiceCall + + mux *http.ServeMux + + sidSeq uint64 +} + +// NewServer creates a new Server. +func NewServer(cfg Config) *Server { + if cfg.MinQueueTime == 0 { + cfg.MinQueueTime = 100 * time.Millisecond + } + s := &Server{ + cfg: cfg, + callbacks: make(map[string]string), + mux: http.NewServeMux(), + messages: make(map[string]*SMS), + calls: make(map[string]*VoiceCall), + smsCh: make(chan *SMS), + callCh: make(chan *VoiceCall), + errs: make(chan error, 10000), + } + + base := "/Accounts/" + cfg.AccountSID + + s.mux.HandleFunc(base+"/Calls.json", s.serveNewCall) + s.mux.HandleFunc(base+"/Messages.json", s.serveNewMessage) + s.mux.HandleFunc(base+"/Calls/", s.serveCallStatus) + s.mux.HandleFunc(base+"/Messages/", s.serveMessageStatus) + + go s.loop() + return s +} + +// Errors returns a channel that gets fed all errors when calling +// the backend. +func (s *Server) Errors() chan error { + return s.errs +} + +func (s *Server) post(url string, v url.Values) ([]byte, error) { + req, err := http.NewRequest("POST", url, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.Header.Set("X-Twilio-Signature", string(twilio.Signature(s.cfg.AuthToken, url, v))) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + if resp.StatusCode/100 != 2 { + return nil, errors.Errorf("non-2xx response: %s", resp.Status) + } + defer resp.Body.Close() + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if len(data) == 0 && resp.StatusCode != 204 { + return nil, errors.Errorf("non-204 response on empty body: %s", resp.Status) + } + + return data, nil +} + +func (s *Server) processMessages() { + s.mx.Lock() + for _, sms := range s.messages { + if time.Since(sms.start) < s.cfg.MinQueueTime { + continue + } + switch sms.msg.Status { + case twilio.MessageStatusAccepted: + defer sms.updateStatus(twilio.MessageStatusQueued) + case twilio.MessageStatusQueued: + // move to sending once it's been pulled from the channel + select { + case s.smsCh <- sms: + sms.msg.Status = twilio.MessageStatusSending + default: + } + } + } + s.mx.Unlock() +} +func (s *Server) id(prefix string) string { + return fmt.Sprintf("%s%032d", prefix, atomic.AddUint64(&s.sidSeq, 1)) +} +func (s *Server) processCalls() { + for _, vc := range s.calls { + if time.Since(vc.start) < s.cfg.MinQueueTime { + continue + } + switch vc.call.Status { + case twilio.CallStatusQueued: + vc.updateStatus(twilio.CallStatusInitiated) + case twilio.CallStatusInitiated: + // move to ringing once it's been pulled from the channel + s.mx.Lock() + select { + case s.callCh <- vc: + vc.call.Status = twilio.CallStatusRinging + default: + } + s.mx.Unlock() + case twilio.CallStatusInProgress: + s.mx.Lock() + if vc.hangup || vc.needsProcessing { + select { + case s.callCh <- vc: + vc.needsProcessing = false + if vc.hangup { + vc.call.Status = twilio.CallStatusCompleted + } + default: + } + } + s.mx.Unlock() + } + } +} +func (s *Server) loop() { + sendT := time.NewTicker(10 * time.Millisecond) + for range sendT.C { + s.processMessages() + s.processCalls() + } +} + +func apiError(status int, w http.ResponseWriter, e *twilio.Exception) { + w.WriteHeader(status) + err := json.NewEncoder(w).Encode(e) + if err != nil { + panic(err) + } +} + +// ServeHTTP implements the http.Handler interface for serving [mock] API requests. +func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s.mux.ServeHTTP(w, req) +} + +// RegisterSMSCallback will set/update a callback URL for SMS calls made to the given number. +func (s *Server) RegisterSMSCallback(number, url string) error { + err := validate.URL("URL", url) + if err != nil { + return err + } + s.mx.Lock() + defer s.mx.Unlock() + s.callbacks["SMS:"+number] = url + return nil +} + +// RegisterVoiceCallback will set/update a callback URL for voice calls made to the given number. +func (s *Server) RegisterVoiceCallback(number, url string) error { + err := validate.URL("URL", url) + if err != nil { + return err + } + s.mx.Lock() + defer s.mx.Unlock() + s.callbacks["VOICE:"+number] = url + return nil +} diff --git a/devtools/mocktwilio/sms.go b/devtools/mocktwilio/sms.go new file mode 100644 index 0000000000..48d4e1f49d --- /dev/null +++ b/devtools/mocktwilio/sms.go @@ -0,0 +1,168 @@ +package mocktwilio + +import ( + "encoding/json" + "github.com/target/goalert/notification/twilio" + "github.com/target/goalert/validation/validate" + "net/http" + "net/url" + "path" + "strings" + "time" + + "github.com/pkg/errors" +) + +func (s *Server) serveNewMessage(w http.ResponseWriter, req *http.Request) { + if req.Method != "POST" { + http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) + return + } + var sms SMS + sms.msg.From = req.FormValue("From") + if s.callbacks["SMS:"+sms.msg.From] == "" { + apiError(400, w, &twilio.Exception{ + Code: 21606, + Message: `The "From" phone number provided is not a valid, SMS-capable inbound phone number for your account.`, + }) + return + } + sms.msg.To = req.FormValue("To") + sms.msg.SID = s.id("SM") + sms.msg.Status = twilio.MessageStatusAccepted + sms.callbackURL = req.FormValue("StatusCallback") + sms.start = time.Now() + sms.s = s + err := validate.URL("StatusCallback", sms.callbackURL) + if err != nil { + apiError(400, w, &twilio.Exception{ + Code: 11100, + Message: err.Error(), + }) + } + + sms.body = req.FormValue("Body") + + s.mx.Lock() + s.messages[sms.msg.SID] = &sms + s.mx.Unlock() + + w.WriteHeader(201) + err = json.NewEncoder(w).Encode(sms.msg) + if err != nil { + panic(err) + } +} +func (s *Server) serveMessageStatus(w http.ResponseWriter, req *http.Request) { + id := strings.TrimSuffix(path.Base(req.URL.Path), ".json") + var msg twilio.Message + + s.mx.RLock() + sms := s.messages[id] + if sms != nil { + msg = sms.msg // copy while we have the read lock + } + s.mx.RUnlock() + + if sms == nil { + http.NotFound(w, req) + return + } + err := json.NewEncoder(w).Encode(msg) + if err != nil { + panic(err) + } +} + +// SMS represents an SMS message. +type SMS struct { + s *Server + msg twilio.Message + body string + callbackURL string + start time.Time +} + +func (sms *SMS) updateStatus(stat twilio.MessageStatus) { + // move to queued + sms.s.mx.Lock() + sms.msg.Status = stat + sms.s.mx.Unlock() + + // attempt post to status callback + _, err := sms.s.post(sms.callbackURL, sms.values(false)) + if err != nil { + sms.s.errs <- errors.Wrap(err, "post to SMS status callback") + } +} + +func (sms *SMS) values(body bool) url.Values { + v := make(url.Values) + sms.s.mx.RLock() + v.Set("MessageStatus", string(sms.msg.Status)) + v.Set("MessageSid", sms.msg.SID) + v.Set("To", sms.msg.To) + v.Set("From", sms.msg.From) + if body { + v.Set("Body", sms.body) + } + sms.s.mx.RUnlock() + return v +} + +// SMS will return a channel that will be fed incomming SMS messages as they arrive. +func (s *Server) SMS() chan *SMS { + return s.smsCh +} + +// SendSMS will cause an SMS to be sent to the given number with the contents of body. +// +// The to parameter must match a value passed to RegisterSMSCallback or an error is returned. +func (s *Server) SendSMS(from, to, body string) { + s.mx.Lock() + cbURL := s.callbacks["SMS:"+to] + s.mx.Unlock() + + if cbURL == "" { + s.errs <- errors.New("unknown/unregistered desination (to) number") + } + + v := make(url.Values) + v.Set("From", from) + v.Set("Body", body) + + _, err := s.post(cbURL, v) + if err != nil { + s.errs <- err + } +} + +// ID will return the unique ID for this SMS. +func (sms *SMS) ID() string { + return sms.msg.SID +} + +// From returns the phone number the SMS was sent from. +func (sms *SMS) From() string { + return sms.msg.From +} + +// To returns the phone number the SMS is being sent to. +func (sms *SMS) To() string { + return sms.msg.To +} + +// Body returns the contents of the SMS message. +func (sms *SMS) Body() string { + return sms.body +} + +// Accept will cause the SMS to be marked as delivered. +func (sms *SMS) Accept() { + sms.updateStatus(twilio.MessageStatusDelivered) +} + +// Reject will cause the SMS to be marked as undelivered (failed). +func (sms *SMS) Reject() { + sms.updateStatus(twilio.MessageStatusFailed) +} diff --git a/devtools/mocktwilio/voicecall.go b/devtools/mocktwilio/voicecall.go new file mode 100644 index 0000000000..a4e1312295 --- /dev/null +++ b/devtools/mocktwilio/voicecall.go @@ -0,0 +1,261 @@ +package mocktwilio + +import ( + "encoding/json" + "encoding/xml" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/target/goalert/notification/twilio" + "github.com/target/goalert/validation/validate" +) + +// VoiceCall represents a voice call session. +type VoiceCall struct { + s *Server + + call twilio.Call + + // start is used to track when the call was created (entered queue) + start time.Time + + // callStart tracks when the call was accepted + // and is used to cacluate call.CallDuration when completed. + callStart time.Time + url string + callbackURL string + callbackEvents []string + message string + needsProcessing bool + hangup bool +} + +func (s *Server) serveCallStatus(w http.ResponseWriter, req *http.Request) { + id := strings.TrimSuffix(path.Base(req.URL.Path), ".json") + var call twilio.Call + + s.mx.RLock() + vc := s.calls[id] + if vc != nil { + call = vc.call // copy while we have the read lock + } + s.mx.RUnlock() + + if vc == nil { + http.NotFound(w, req) + return + } + err := json.NewEncoder(w).Encode(call) + if err != nil { + panic(err) + } + +} + +func (s *Server) serveNewCall(w http.ResponseWriter, req *http.Request) { + if req.Method != "POST" { + http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) + return + } + var vc VoiceCall + + vc.call.From = req.FormValue("From") + if s.callbacks["VOICE:"+vc.call.From] == "" { + apiError(400, w, &twilio.Exception{ + Message: "Wrong from number.", + }) + return + } + vc.s = s + vc.call.To = req.FormValue("To") + vc.call.SID = s.id("CA") + vc.call.SequenceNumber = new(int) + vc.callbackURL = req.FormValue("StatusCallback") + err := validate.URL("StatusCallback", vc.callbackURL) + if err != nil { + apiError(400, w, &twilio.Exception{ + Code: 11100, + Message: err.Error(), + }) + } + vc.url = req.FormValue("Url") + err = validate.URL("StatusCallback", vc.url) + if err != nil { + apiError(400, w, &twilio.Exception{ + Code: 11100, + Message: err.Error(), + }) + } + + vc.callbackEvents = map[string][]string(req.Form)["StatusCallbackEvent"] + vc.callbackEvents = append(vc.callbackEvents, "completed", "failed") // always send completed and failed + vc.start = time.Now() + + vc.call.Status = twilio.CallStatusQueued + + s.mx.Lock() + s.calls[vc.call.SID] = &vc + s.mx.Unlock() + + w.WriteHeader(201) + err = json.NewEncoder(w).Encode(vc.call) + if err != nil { + panic(err) + } + +} + +func (vc *VoiceCall) updateStatus(stat twilio.CallStatus) { + // move to queued + vc.s.mx.Lock() + vc.call.Status = stat + if stat == twilio.CallStatusInProgress { + vc.callStart = time.Now() + } + if stat == twilio.CallStatusCompleted { + vc.call.CallDuration = time.Since(vc.callStart) + } + *vc.call.SequenceNumber++ + vc.s.mx.Unlock() + var sendEvent bool + evtName := string(stat) + if evtName == "in-progres" { + evtName = "answered" + } + for _, e := range vc.callbackEvents { + if e == evtName { + sendEvent = true + break + } + } + + if !sendEvent { + return + } + + // attempt post to status callback + _, err := vc.s.post(vc.callbackURL, vc.values("")) + if err != nil { + vc.s.errs <- errors.Wrap(err, "post to call status callback") + } +} +func (vc *VoiceCall) values(digits string) url.Values { + vc.s.mx.RLock() + v := make(url.Values) + v.Set("CallSid", vc.call.SID) + v.Set("CallStatus", string(vc.call.Status)) + v.Set("To", vc.call.To) + v.Set("From", vc.call.From) + v.Set("Direction", "outbound-api") + v.Set("SequenceNumber", strconv.Itoa(*vc.call.SequenceNumber)) + if vc.call.Status == twilio.CallStatusCompleted { + v.Set("CallDuration", strconv.FormatFloat(vc.call.CallDuration.Seconds(), 'f', 1, 64)) + } + + if digits != "" { + v.Set("Digits", digits) + } + vc.s.mx.RUnlock() + + return v +} + +// VoiceCalls will return a channel that will be fed VoiceCalls as they arrive. +func (s *Server) VoiceCalls() chan *VoiceCall { + return s.callCh +} + +// Accept will allow a call to move from initiated to "in-progress". +func (vc *VoiceCall) Accept() { + vc.updateStatus(twilio.CallStatusInProgress) + vc.PressDigits("") +} + +// Reject will reject a call, moving it to a "failed" state. +func (vc *VoiceCall) Reject() { + vc.updateStatus(twilio.CallStatusFailed) +} + +// Hangup will end the call, setting it's state to "completed". +func (vc *VoiceCall) Hangup() { + vc.updateStatus(twilio.CallStatusCompleted) +} + +// PressDigits will re-query for a spoken message with the given digits. +// +// It also causes the result of Listen() to be blank until a new message is gathered. +func (vc *VoiceCall) PressDigits(digits string) { + data, err := vc.s.post(vc.url, vc.values(digits)) + if err != nil { + vc.s.errs <- err + return + } + type resp struct { + XMLName xml.Name `xml:"Response"` + Say []string `xml:"Say"` + Gather struct { + Action string `xml:"action,attr"` + Say []string `xml:"Say"` + } + RedirectURL string `xml:"Redirect"` + Hangup *struct{} `xml:"Hangup"` + } + var r resp + err = xml.Unmarshal(data, &r) + if err != nil { + vc.s.errs <- errors.Wrap(err, "unmarshal XML voice response") + return + } + + // use data to update callbackURL and/or message + s := append(r.Say, r.Gather.Say...) + vc.s.mx.Lock() + if r.Gather.Action != "" { + vc.url = r.Gather.Action + } + if r.RedirectURL != "" { + vc.needsProcessing = false + // Twilio's own implementation is totally broken with relative URLs, so we assume absolute (since that's all we use as a consequence) + vc.url = r.RedirectURL + } else { + vc.needsProcessing = true + } + if r.Hangup != nil { + vc.hangup = true + } + vc.message = strings.Join(s, "\n") + vc.s.mx.Unlock() + + if r.RedirectURL != "" { + // redirect and get new message + vc.PressDigits("") + } +} + +// ID returns the unique ID of this phone call. +// It is analogus to the Twilio SID of a call. +func (vc *VoiceCall) ID() string { + return vc.call.SID +} + +// To returns the destination phone number. +func (vc *VoiceCall) To() string { + return vc.call.To +} + +// From return the source phone number. +func (vc *VoiceCall) From() string { + return vc.call.From +} + +// Message will return the last spoken message of the call. +func (vc *VoiceCall) Message() string { + vc.s.mx.RLock() + defer vc.s.mx.RUnlock() + return vc.message +} diff --git a/devtools/ordermigrations/main.go b/devtools/ordermigrations/main.go new file mode 100644 index 0000000000..53971ccaf9 --- /dev/null +++ b/devtools/ordermigrations/main.go @@ -0,0 +1,60 @@ +package main + +import ( + "flag" + "fmt" + "log" + "os" + "os/exec" + "sort" + "strings" + "time" +) + +const stampFormat = "20060102150405" + +func runCmd(name string, args ...string) string { + cmd := exec.Command(name, args...) + cmd.Stderr = os.Stderr + fmt.Println("+ ", name, strings.Join(args, " ")) + data, err := cmd.Output() + if err != nil { + log.Fatal(err) + } + return strings.TrimSpace(string(data)) +} +func setTime(s string, t time.Time) string { + return s[:19] + t.Format(stampFormat) + s[33:] +} + +func main() { + check := flag.Bool("check", false, "Exit with error status on wrong order, but don't actually rename anything.") + flag.Parse() + runCmd("git", "fetch", "--no-tags", "origin", "+refs/heads/master:") + masterMigrations := strings.Split(runCmd("git", "ls-tree", "-r", "--name-only", "origin/master", "--", "migrate/migrations"), "\n") + newMigrations := strings.Split(runCmd("git", "diff", "--name-only", "origin/master", "--", "migrate/migrations"), "\n") + + sort.Strings(masterMigrations) + sort.Strings(newMigrations) + + if len(newMigrations) == 0 || len(newMigrations) == 1 && newMigrations[0] == "" { + return + } + + if newMigrations[0] > masterMigrations[len(masterMigrations)-1] { + // already in order + return + } + + if *check { + log.Println(newMigrations[0], "<=", masterMigrations[len(masterMigrations)-1]) + log.Fatal("found new migrations before those in master") + } + + t := time.Now().Add(time.Minute) + for _, m := range newMigrations { + runCmd("git", "mv", m, setTime(m, t)) + t = t.Add(time.Minute) + } + +} diff --git a/devtools/resetdb/main.go b/devtools/resetdb/main.go new file mode 100644 index 0000000000..222491bcd1 --- /dev/null +++ b/devtools/resetdb/main.go @@ -0,0 +1,533 @@ +package main + +import ( + "context" + "database/sql" + "flag" + "fmt" + "math/rand" + "os" + "sync" + "time" + + "github.com/target/goalert/alert" + "github.com/target/goalert/integrationkey" + "github.com/target/goalert/migrate" + "github.com/target/goalert/permission" + "github.com/target/goalert/schedule/rotation" + "github.com/target/goalert/user/contactmethod" + "github.com/target/goalert/util/log" + "github.com/target/goalert/validation/validate" + + "github.com/brianvoe/gofakeit" + "github.com/lib/pq" + "github.com/pkg/errors" +) + +func main() { + rand := flag.Bool("with-rand-data", false, "Repopulates the DB with random data.") + skipMigrate := flag.Bool("no-migrate", false, "Disables UP migration.") + flag.Parse() + err := doMigrations(skipMigrate) + if err != nil { + log.Log(context.TODO(), err) + os.Exit(1) + } + + if *rand { + err := fillDB() + if err != nil { + fmt.Println("ERROR:", err) + os.Exit(1) + } + } +} +func noErr(ctx context.Context, err error) { + if err == nil { + return + } + + log.Log(ctx, errors.WithStack(err)) + os.Exit(1) +} + +// Constant values for data generation +const ( + UserCount = 1619 // select count(id) from users + CMMax = 7 // select count(id) from user_contact_methods group by user_id order by count desc limit 1 + NRMax = 20 // select count(id) from user_notification_rules group by user_id order by count desc limit 1 + NRCMMax = 11 // select count(id) from user_notification_rules group by user_id,contact_method_id order by count desc limit 1 + EPCount = 371 // select count(id) from escalation_policies + EPMaxStep = 8 // select count(id) from escalation_policy_steps group by escalation_policy_id order by count desc limit 1 + EPMaxAssigned = 19 // select count(id) from escalation_policy_actions group by escalation_policy_step_id order by count desc limit 1 + SvcCount = 397 // select count(id) from services + RotationMaxPart = 64 // select count(id) from rotation_participants group by rotation_id order by count desc limit 1 + ScheduleCount = 404 // select count(id) from schedules + AlertClosedCount = 76909 // select count(id) from alerts where status = 'closed' + AlertActiveCount = 2762 // select count(id) from alerts where status = 'triggered' or status = 'active' + RotationCount = 529 // select count(id) from rotations + IntegrationKeyMax = 11 // select count(id) from integration_keys group by service_id order by count desc limit 1 + ScheduleMaxRules = 10 // select count(id) from schedule_rules group by schedule_id order by count desc limit 1 + ScheduleMaxOverrides = 10 +) + +var ( + genRecords int + genTables int +) + +type intGen struct { + m map[int]bool + mx sync.Mutex +} + +func newIntGen() *intGen { + return &intGen{ + m: make(map[int]bool), + } +} +func (g *intGen) Gen(n int) int { + g.mx.Lock() + defer g.mx.Unlock() + for { + value := rand.Intn(n) + if g.m[value] { + continue + } + g.m[value] = true + return value + } +} + +type gen struct { + m map[string]bool + mx sync.Mutex +} + +func newGen() *gen { + return &gen{ + m: make(map[string]bool), + } +} +func (g *gen) PickOne(s []string) string { + return g.Gen(func() string { return pickOneStr(s) }) +} +func (g *gen) Gen(fn func() string) string { + g.mx.Lock() + defer g.mx.Unlock() + for { + value := fn() + if g.m[value] { + continue + } + g.m[value] = true + return value + } +} + +func idName(suffix string) func() string { + return func() string { + var res string + for { + res = fmt.Sprintf("%s %s %s %s", gofakeit.JobDescriptor(), gofakeit.BuzzWord(), gofakeit.JobLevel(), suffix) + err := validate.IDName("", res) + if err == nil { + return res + } + } + } +} + +func pickOneStr(s []string) string { + return s[rand.Intn(len(s))] +} + +type table struct { + ctx context.Context + stmt *sql.Stmt + n int + name string + s time.Time +} + +func NewTable(ctx context.Context, tx *sql.Tx, name string, cols []string) *table { + stmt, err := tx.Prepare(pq.CopyIn(name, cols...)) + noErr(ctx, err) + return &table{ctx: ctx, stmt: stmt, name: name, s: time.Now()} +} +func (t *table) Close() { + noErr(t.ctx, t.stmt.Close()) + fmt.Printf("%s: %d records in %s\n", t.name, t.n, time.Since(t.s).String()) + genRecords += t.n + genTables++ +} +func (t *table) Insert(args ...interface{}) { + t.n++ + _, err := t.stmt.Exec(args...) + noErr(t.ctx, err) +} + +func fillDB() error { + db, err := openDB() + if err != nil { + return errors.Wrap(err, "open DB") + } + defer db.Close() + + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + ctx = permission.SystemContext(ctx, "resetdb") + start := time.Now() + tx, err := db.BeginTx(ctx, nil) + noErr(ctx, err) + + defer tx.Rollback() + + users := NewTable(ctx, tx, "users", []string{"id", "name", "role", "email"}) + usrGen := newGen() + var userIDs []string + for i := 0; i < UserCount; i++ { + id := gofakeit.UUID() + userIDs = append(userIDs, id) + users.Insert(id, usrGen.Gen(gofakeit.Name), permission.RoleUser, usrGen.Gen(gofakeit.Email)) + } + users.Close() + + p := 0 + phone := func() string { + p++ + return fmt.Sprintf("+17633%06d", p) + } + var nRules [][]interface{} + cm := NewTable(ctx, tx, "user_contact_methods", []string{"id", "name", "value", "user_id", "type", "disabled"}) + for _, userID := range userIDs { + gen := newGen() + typ := contactmethod.TypeSMS + if gofakeit.Bool() { + typ = contactmethod.TypeVoice + } + n := rand.Intn(CMMax) + var cmIDs []string + for i := 0; i < n; i++ { + id := gofakeit.UUID() + cm.Insert(id, gen.Gen(gofakeit.FirstName), phone(), userID, typ, true) + cmIDs = append(cmIDs, id) + } + nr := 0 + nrTotal := rand.Intn(NRMax) + for _, cmID := range cmIDs { + nrGen := newIntGen() + n := rand.Intn(NRCMMax) + nr + for ; nr <= n && nr <= nrTotal; nr++ { + nRules = append(nRules, []interface{}{gofakeit.UUID(), nrGen.Gen(60), cmID, userID}) + } + } + } + cm.Close() + + nr := NewTable(ctx, tx, "user_notification_rules", []string{"id", "delay_minutes", "contact_method_id", "user_id"}) + for _, rules := range nRules { + nr.Insert(rules...) + } + nr.Close() + + zones := []string{"America/Chicago", "Europe/Berlin", "UTC"} + rotTypes := []rotation.Type{rotation.TypeDaily, rotation.TypeHourly, rotation.TypeWeekly} + + rotGen := newGen() + var rotationIDs []string + rot := NewTable(ctx, tx, "rotations", []string{"id", "name", "description", "time_zone", "shift_length", "start_time", "type"}) + for i := 0; i < RotationCount; i++ { + id := gofakeit.UUID() + rot.Insert( + id, + rotGen.Gen(idName("Rotation")), + gofakeit.Sentence(rand.Intn(10)+3), + zones[rand.Intn(len(zones))], + rand.Intn(14)+1, + gofakeit.DateRange(time.Now().AddDate(-3, 0, 0), time.Now()), + rotTypes[rand.Intn(len(rotTypes))], + ) + rotationIDs = append(rotationIDs, id) + } + rot.Close() + + rPart := NewTable(ctx, tx, "rotation_participants", []string{"id", "rotation_id", "position", "user_id"}) + for _, rotID := range rotationIDs { + n := rand.Intn(RotationMaxPart) + for i := 0; i < n; i++ { + rPart.Insert(gofakeit.UUID(), rotID, i, pickOneStr(userIDs)) //duplicates ok + } + } + rPart.Close() + + schedGen := newGen() + sc := NewTable(ctx, tx, "schedules", []string{"id", "name", "description", "time_zone"}) + var scheduleIDs []string + for i := 0; i < ScheduleCount; i++ { + id := gofakeit.UUID() + sc.Insert(id, + schedGen.Gen(idName("Schedule")), + gofakeit.Sentence(rand.Intn(10)+3), + zones[rand.Intn(len(zones))], + ) + scheduleIDs = append(scheduleIDs, id) + } + sc.Close() + + uo := NewTable(ctx, tx, "user_overrides", + []string{ + "id", + "tgt_schedule_id", + "add_user_id", + "remove_user_id", + "start_time", + "end_time", + }, + ) + for _, schedID := range scheduleIDs { + n := rand.Intn(ScheduleMaxOverrides) + u := make(map[string]bool, len(userIDs)) + nextUser := func() string { + for { + id := pickOneStr(userIDs) + if u[id] { + continue + } + u[id] = true + return id + } + } + for i := 0; i < n; i++ { + var add, rem sql.NullString + if gofakeit.Bool() { + add.Valid = true + add.String = nextUser() + } + if !add.Valid || gofakeit.Bool() { + rem.Valid = true + rem.String = nextUser() + } + end := gofakeit.DateRange(time.Now(), time.Now().AddDate(0, 1, 0)) + start := gofakeit.DateRange(time.Now().AddDate(0, -1, 0), end.Add(-time.Minute)) + uo.Insert( + gofakeit.UUID(), + schedID, + add, rem, + start, end, + ) + } + } + uo.Close() + + sr := NewTable(ctx, tx, "schedule_rules", + []string{ + "id", + "schedule_id", + "sunday", "monday", "tuesday", "wednesday", "thursday", "friday", "saturday", + "start_time", "end_time", + "tgt_user_id", "tgt_rotation_id", + }) + for _, schedID := range scheduleIDs { + n := rand.Intn(ScheduleMaxRules) + for i := 0; i < n; i++ { + var usr, rot sql.NullString + if gofakeit.Bool() { + usr.Valid = true + usr.String = pickOneStr(userIDs) + } else { + rot.Valid = true + rot.String = pickOneStr(rotationIDs) + } + sr.Insert( + gofakeit.UUID(), + schedID, + gofakeit.Bool(), gofakeit.Bool(), gofakeit.Bool(), gofakeit.Bool(), gofakeit.Bool(), gofakeit.Bool(), gofakeit.Bool(), + gofakeit.Date(), gofakeit.Date(), + usr, rot, + ) + } + } + sr.Close() + + var epIDs []string + ep := NewTable(ctx, tx, "escalation_policies", []string{"id", "name", "description", "repeat"}) + epGen := newGen() + for i := 0; i < EPCount; i++ { + id := gofakeit.UUID() + ep.Insert(id, epGen.Gen(idName("Policy")), gofakeit.Sentence(rand.Intn(10)+3), rand.Intn(3)) + epIDs = append(epIDs, id) + } + ep.Close() + + var epStepIDs []string + eps := NewTable(ctx, tx, "escalation_policy_steps", []string{"id", "escalation_policy_id", "step_number", "delay"}) + for _, epID := range epIDs { + n := rand.Intn(EPMaxStep) + for i := 0; i < n; i++ { + id := gofakeit.UUID() + eps.Insert( + id, + epID, + i, + rand.Intn(25)+5, + ) + epStepIDs = append(epStepIDs, id) + } + } + eps.Close() + + epAct := NewTable(ctx, tx, "escalation_policy_actions", []string{"id", "escalation_policy_step_id", "user_id", "schedule_id", "rotation_id"}) + for _, epStepID := range epStepIDs { + epActGen := newGen() + n := rand.Intn(EPMaxAssigned) + for i := 0; i < n; i++ { + var usr, sched, rot sql.NullString + switch rand.Intn(3) { + case 0: + usr.Valid = true + usr.String = epActGen.PickOne(userIDs) + case 1: + sched.Valid = true + sched.String = epActGen.PickOne(scheduleIDs) + case 2: + rot.Valid = true + rot.String = epActGen.PickOne(rotationIDs) + } + epAct.Insert( + gofakeit.UUID(), + epStepID, + usr, sched, rot, + ) + } + } + epAct.Close() + + var serviceIDs []string + svcGen := newGen() + svc := NewTable(ctx, tx, "services", []string{"id", "name", "description", "escalation_policy_id"}) + for i := 0; i < SvcCount; i++ { + id := gofakeit.UUID() + svc.Insert( + id, + svcGen.Gen(idName("Service")), + gofakeit.Sentence(rand.Intn(10)+3), + pickOneStr(epIDs), + ) + serviceIDs = append(serviceIDs, id) + } + svc.Close() + + iKey := NewTable(ctx, tx, "integration_keys", []string{"id", "name", "type", "service_id"}) + for _, serviceID := range serviceIDs { + genIKey := newGen() + n := rand.Intn(IntegrationKeyMax) + for i := 0; i < n; i++ { + typ := integrationkey.TypeGrafana + if gofakeit.Bool() { + typ = integrationkey.TypeGeneric + } + iKey.Insert( + gofakeit.UUID(), + genIKey.Gen(idName("Key")), + typ, + serviceID, + ) + } + } + iKey.Close() + + aTbl := NewTable(ctx, tx, "alerts", []string{"summary", "details", "status", "service_id", "source", "dedup_key"}) + totalAlerts := AlertActiveCount + AlertClosedCount + for i := 0; i < totalAlerts; i++ { + a := alert.Alert{ + Summary: gofakeit.Sentence(rand.Intn(10) + 3), + Source: alert.SourceGrafana, + ServiceID: pickOneStr(serviceIDs), + Status: alert.StatusClosed, + } + + if gofakeit.Bool() { + a.Details = gofakeit.Sentence(rand.Intn(30) + 1) + } + if i < AlertActiveCount { + a.Status = alert.StatusActive + } + if gofakeit.Bool() { + a.Source = alert.SourceManual + } + var dedup *alert.DedupID + if a.Status != alert.StatusClosed { + dedup = a.DedupKey() + } + aTbl.Insert( + a.Summary, + a.Details, + a.Status, + a.ServiceID, + a.Source, + dedup, + ) + } + aTbl.Close() + + noErr(ctx, tx.Commit()) + fmt.Printf("Finished %d records across %d tables in %s\n", genRecords, genTables, time.Since(start).String()) + + return nil +} + +// openDB will open dbconfig.yml to detect the datasource, and attempt to open a DB connection. +func openDB() (*sql.DB, error) { + return sql.Open("postgres", "user=goalert dbname=goalert sslmode=disable") +} + +func recreateDB() error { + db, err := sql.Open("postgres", "user=goalert dbname=postgres sslmode=disable") + if err != nil { + return err + } + defer db.Close() + + _, err = db.Exec("drop database goalert") + if err != nil { + return err + } + _, err = db.Exec("create database goalert") + return err +} + +func resetDB(db *sql.DB) error { + var err error + if flag.Arg(0) != "" { + _, err = migrate.Up(context.Background(), db, flag.Arg(0)) + } else { + _, err = migrate.ApplyAll(context.Background(), db) + } + return err +} + +func doMigrations(skipMigrate *bool) error { + err := recreateDB() + if err != nil { + return errors.Wrap(err, "recreate DB") + } + + db, err := openDB() + if err != nil { + return errors.Wrap(err, "open DB") + } + defer db.Close() + + if *skipMigrate { + return nil + } + + err = resetDB(db) + if err != nil { + return errors.Wrap(err, "perform migration after resettting") + } + return nil +} diff --git a/devtools/runjson/ci-cypress.json b/devtools/runjson/ci-cypress.json new file mode 100644 index 0000000000..26efcd9a73 --- /dev/null +++ b/devtools/runjson/ci-cypress.json @@ -0,0 +1,61 @@ +[ + { + "Name": "Backend", + "Before": { + "Name": "Prep", + "Dir": "web/src", + "Command": ["./scripts/smoketest-prep.sh"], + "Env": [ + "DB_URL=postgres://postgres@localhost:5432/postgres?sslmode=disable", + "GOALERT=../../bin/goalert", + "BASE_URL=http://localhost:3040" + ] + }, + "Command": [ + "bin/goalert", + "-l=localhost:3042", + "--db-url=postgres://postgres@localhost:5432/postgres?sslmode=disable", + "--log-requests=false", + "--slack-base-url=http://localhost:3040/slack" + ] + }, + { + "Name": "Slack", + "Command": [ + "bin/mockslack", + "-client-id=555449060693.555449060694", + "-client-secret=52fdfc072182654f163f5f0f9a621d72", + "-access-token=xoxp-555449060693-555449060694-587071460694-9566c74d10037c4d7bbb0407d1e2c649", + "-prefix=/slack", + "-single-user=bob", + "-addr=localhost:3046" + ] + }, + { + "Name": "Proxy", + "Command": [ + "bin/simpleproxy", + "-addr=localhost:3040", + "/slack/=http://localhost:3046", + "http://localhost:3042" + ] + }, + { + "Name": "Cypress", + "Before": { + "Name": "BE Wait", + "Command": ["bin/waitfor", "http://localhost:3042"] + }, + "Command": [ + "./node_modules/.bin/cypress", + "run", + "--config", + "baseUrl=http://localhost:3040" + ], + "Env": [ + "CYPRESS_DB_URL=postgres://postgres@localhost:5432/postgres?sslmode=disable" + ], + "Dir": "web/src", + "ExitAfter": true + } +] diff --git a/devtools/runjson/localdev-cypress-prod.json b/devtools/runjson/localdev-cypress-prod.json new file mode 100644 index 0000000000..7f29fe3e11 --- /dev/null +++ b/devtools/runjson/localdev-cypress-prod.json @@ -0,0 +1,101 @@ +[ + { + "Name": "Build-BE", + "Command": ["make", "-s", "bin/goalert", "BUNDLE=1"], + "Restart": true, + "Quiet": true, + "IgnoreErrors": true + }, + { + "Name": "Backend", + "Command": [ + "bin/goalert", + "-l=localhost:3042", + "--db-url=postgres://postgres@localhost:5433/postgres?sslmode=disable", + "--slack-base-url=http://localhost:3040/slack" + ], + "Dir": "web/src", + "Restart": true, + "IgnoreErrors": true, + "Watch": true, + "Before": { + "Name": "Prep", + "Before": { + "Name": "PG Wait", + "Command": [ + "bin/waitfor", + "postgres://postgres@localhost:5433/postgres?sslmode=disable" + ] + }, + "Dir": "web/src", + "Command": ["./scripts/smoketest-prep.sh"], + "Env": [ + "DB_URL=postgres://postgres@localhost:5433/postgres?sslmode=disable", + "GOALERT=../../bin/goalert", + "BASE_URL=http://localhost:3040" + ] + } + }, + { + "Name": "Slack", + "Restart": true, + "Watch": true, + "Command": [ + "bin/mockslack", + "-client-id=555449060693.555449060694", + "-client-secret=52fdfc072182654f163f5f0f9a621d72", + "-access-token=xoxp-555449060693-555449060694-587071460694-9566c74d10037c4d7bbb0407d1e2c649", + "-prefix=/slack", + "-single-user=bob", + "-addr=localhost:3046" + ] + }, + { + "Name": "Proxy", + "Command": [ + "bin/simpleproxy", + "-addr=localhost:3040", + "/slack/=http://localhost:3046", + "http://localhost:3042" + ] + }, + { + "Name": "Postgres", + "Before": { + "Name": "PG Cleanup", + "Command": ["docker", "rm", "-f", "smoketest-postgres"], + "IgnoreErrors": true + }, + "After": { + "Name": "PG Cleanup", + "Command": ["docker", "rm", "-f", "smoketest-postgres"], + "IgnoreErrors": true + }, + "Command": [ + "docker", + "run", + "--rm", + "--name=smoketest-postgres", + "-p=5433:5432", + "postgres:11-alpine" + ] + }, + { + "Name": "Cypress", + "Before": { + "Name": "BE Wait", + "Command": ["bin/waitfor", "http://localhost:3042"] + }, + "Command": [ + "./node_modules/.bin/cypress", + "open", + "--config", + "baseUrl=http://localhost:3040" + ], + "Env": [ + "CYPRESS_DB_URL=postgres://postgres@localhost:5433/postgres?sslmode=disable" + ], + "Dir": "web/src", + "ExitAfter": true + } +] diff --git a/devtools/runjson/localdev-cypress.json b/devtools/runjson/localdev-cypress.json new file mode 100644 index 0000000000..a9ef002aa6 --- /dev/null +++ b/devtools/runjson/localdev-cypress.json @@ -0,0 +1,116 @@ +[ + { + "Name": "Build-BE", + "Command": ["make", "-s", "bin/goalert"], + "Restart": true, + "Quiet": true, + "IgnoreErrors": true + }, + { + "Name": "Backend", + "Command": [ + "bin/goalert", + "-l=localhost:3042", + "--ui-url=http://localhost:3045", + "--db-url=postgres://postgres@localhost:5433/postgres?sslmode=disable", + "--slack-base-url=http://localhost:3040/slack" + ], + "Dir": "web/src", + "Restart": true, + "IgnoreErrors": true, + "Watch": true, + "Before": { + "Name": "Prep", + "Before": { + "Name": "PG Wait", + "Command": [ + "bin/waitfor", + "postgres://postgres@localhost:5433/postgres?sslmode=disable" + ] + }, + "Dir": "web/src", + "Command": ["./scripts/smoketest-prep.sh"], + "Env": [ + "DB_URL=postgres://postgres@localhost:5433/postgres?sslmode=disable", + "GOALERT=../../bin/goalert", + "BASE_URL=http://localhost:3040" + ] + } + }, + { + "Name": "UI", + "Command": [ + "./node_modules/.bin/webpack-dev-server", + "--inline", + "--devtool=cheap-module-source-map", + "--allowed-hosts=docker.for.mac.host.internal", + "--port=3045", + "--progress=false", + "--mode=development" + ], + "Dir": "web/src" + }, + { + "Name": "Slack", + "Restart": true, + "Watch": true, + "Command": [ + "bin/mockslack", + "-client-id=555449060693.555449060694", + "-client-secret=52fdfc072182654f163f5f0f9a621d72", + "-access-token=xoxp-555449060693-555449060694-587071460694-9566c74d10037c4d7bbb0407d1e2c649", + "-prefix=/slack", + "-single-user=bob", + "-addr=localhost:3046" + ] + }, + { + "Name": "Proxy", + "Command": [ + "bin/simpleproxy", + "-addr=localhost:3040", + "/slack/=http://localhost:3046", + "http://localhost:3042" + ] + }, + { + "Name": "Postgres", + "Before": { + "Name": "PG Cleanup", + "Command": ["docker", "rm", "-f", "smoketest-postgres"], + "IgnoreErrors": true + }, + "After": { + "Name": "PG Cleanup", + "Command": ["docker", "rm", "-f", "smoketest-postgres"], + "IgnoreErrors": true + }, + "Command": [ + "docker", + "run", + "--rm", + "--name=smoketest-postgres", + "-p=5433:5432", + "postgres:11-alpine" + ] + }, + + { + "Name": "Cypress", + "Before": { + "Name": "BE Wait", + "Command": ["bin/waitfor", "http://localhost:3042"] + }, + "Command": [ + "./node_modules/.bin/cypress", + "open", + "--config", + "baseUrl=http://localhost:3040" + ], + "Env": [ + "CYPRESS_DB_URL=postgres://postgres@localhost:5433/postgres?sslmode=disable" + ], + "Dir": "web/src", + "ExitAfter": true + } +] diff --git a/devtools/runjson/localdev.json b/devtools/runjson/localdev.json new file mode 100644 index 0000000000..10586d9c49 --- /dev/null +++ b/devtools/runjson/localdev.json @@ -0,0 +1,65 @@ +[ + { + "Name": "Build-BE", + "Command": ["make", "-s", "bin/goalert", "BUILD_TAGS=sql_highlight"], + "Restart": true, + "Quiet": true, + "IgnoreErrors": true + }, + { + "Name": "Backend", + "Before": { + "Name": "Wait JE", + "Command": ["bin/waitfor", "http://localhost:16686"] + }, + "Command": [ + "bin/goalert", + "-l=localhost:3030", + "--ui-url=http://localhost:3035", + "--jaeger-endpoint=http://localhost:14268", + "--db-url=postgres://goalert@localhost:5432/goalert?sslmode=disable" + ], + "Restart": true, + "IgnoreErrors": true, + "Watch": true + }, + { + "Name": "UI", + "Command": [ + "./node_modules/.bin/webpack-dev-server", + "--inline", + "--devtool=cheap-module-source-map", + "--allowed-hosts=docker.for.mac.host.internal", + "--port=3035", + "--progress=false", + "--mode=development" + ], + "Dir": "web/src" + }, + { + "Name": "Jaeger", + "Command": [ + "docker", + "run", + "--rm", + "-p=6831:6831/udp", + "-p=6832:6832/udp", + "-p=5778:5778", + "-p=16686:16686", + "-p=14268:14268", + "-p=9411:9411", + "--name=jaeger", + "jaegertracing/all-in-one" + ], + "Before": { + "Name": "JE Cleanup", + "Command": ["docker", "rm", "-f", "jaeger"], + "IgnoreErrors": true + }, + "After": { + "Name": "JE Cleanup", + "Command": ["docker", "rm", "-f", "jaeger"], + "IgnoreErrors": true + } + } +] diff --git a/devtools/runjson/main.go b/devtools/runjson/main.go new file mode 100644 index 0000000000..1e23053c27 --- /dev/null +++ b/devtools/runjson/main.go @@ -0,0 +1,42 @@ +package main + +import ( + "context" + "encoding/json" + "flag" + "log" + "os" + "os/signal" +) + +var logDir string + +func main() { + flag.StringVar(&logDir, "logs", "", "Directory to store copies of all logs. Overwritten on each start.") + flag.Parse() + log.SetFlags(log.Lshortfile) + + var tasks []Task + dec := json.NewDecoder(os.Stdin) + dec.DisallowUnknownFields() + err := dec.Decode(&tasks) + if err != nil { + log.Fatal("decode input:", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ch := make(chan os.Signal) + signal.Notify(ch, os.Interrupt) + go func() { + <-ch + log.Println("Got signal, terminating.") + cancel() + }() + + err = Run(ctx, tasks) + if err != nil { + log.Fatal("run:", err) + } +} diff --git a/devtools/runjson/run.go b/devtools/runjson/run.go new file mode 100644 index 0000000000..3014cb42b6 --- /dev/null +++ b/devtools/runjson/run.go @@ -0,0 +1,96 @@ +package main + +import ( + "bufio" + "context" + "errors" + "io" + "log" + "os" + "sync" + + "github.com/fatih/color" + "github.com/mattn/go-colorable" +) + +var colors = []color.Attribute{ + color.FgRed, + color.FgGreen, + color.FgYellow, + color.FgBlue, + color.FgMagenta, + color.FgCyan, + color.FgHiRed, + color.FgHiGreen, + color.FgHiYellow, + color.FgHiBlue, + color.FgHiMagenta, + color.FgHiCyan, +} + +var mx sync.Mutex + +func newWritePrefixer(attr color.Attribute, prefix string, out io.Writer) io.Writer { + r, w := io.Pipe() + + s := bufio.NewScanner(r) + pref := color.New(attr, color.Bold) + txt := color.New(attr) + + go func() { + for s.Scan() { + mx.Lock() + pref.Fprint(out, prefix) + txt.Fprintln(out, s.Text()) + mx.Unlock() + } + r.CloseWithError(s.Err()) + }() + + return w +} + +func Run(ctx context.Context, tasks []Task) error { + l := 0 + for _, t := range tasks { + if len(t.Name) > l { + l = len(t.Name) + } + } + ctx, cancel := context.WithCancel(ctx) + defer cancel() + var alwaysPass bool + + w := colorable.NewColorable(os.Stdout) + + ch := make(chan error, len(tasks)) + for i, t := range tasks { + go func(i int, t Task) { + attr := colors[i%len(colors)] + err := t.run(ctx, l, attr, w) + ch <- err + if t.ExitAfter { + alwaysPass = err == nil + cancel() + } + }(i, t) + } + + var hasError bool + for range tasks { + err := <-ch + if err != nil { + cancel() + if err != context.Canceled { + hasError = true + log.Println("ERROR:", err) + } + } + } + + if hasError && !alwaysPass { + return errors.New("one or more tasks failed") + } + + return nil +} diff --git a/devtools/runjson/task.go b/devtools/runjson/task.go new file mode 100644 index 0000000000..a1f5eae8cd --- /dev/null +++ b/devtools/runjson/task.go @@ -0,0 +1,168 @@ +package main + +import ( + "context" + "crypto/sha1" + "encoding/hex" + "fmt" + "io" + "log" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/fatih/color" + "github.com/pkg/errors" +) + +// A Task is an independant unit of work +type Task struct { + // Name is used to identify the task in the case of errors, as well as the prefix for logs. + Name string + + // Dir is the working directory. If empty, the current working directory is used. + Dir string + + // Quiet will omit starting messages. + Quiet bool + + // Before is a task that must complete before the current one starts. + Before *Task + + // After is a task that will run after the current one exits for any reason. + After *Task + + // Command contains the binary to run, followed by any/all arguments. + Command []string + + // Env parameters will be set in addition to any current ones. + Env []string + + // Restart will cause the process to restart automatically if it terminates. + Restart bool + + // IgnoreErrors will allow all processes to continue, even if a non-zero exit status is returned. + IgnoreErrors bool + + // Watch will cause the process to restart if/when the binary changes. + Watch bool + + // ExitAfter, if true, will cause all tasks to be terminated when this one finishes. + ExitAfter bool +} + +func hashFile(path string) string { + fd, err := os.Open(path) + if err != nil { + return "" + } + defer fd.Close() + + h := sha1.New() + io.Copy(h, fd) + return hex.EncodeToString(h.Sum(nil)) +} + +func (t *Task) run(ctx context.Context, pad int, attr color.Attribute, w io.Writer) error { + c := color.New(attr) + cb := color.New(attr, color.Bold) + + pref := fmt.Sprintf("\t%-"+strconv.Itoa(pad)+"s", t.Name) + + stdout := newWritePrefixer(attr, pref+" (out): ", w) + stderr := newWritePrefixer(attr, pref+" (err): ", w) + if logDir != "" { + os.MkdirAll(logDir, 0755) + outFile, err := os.Create(filepath.Join(logDir, fmt.Sprintf("%s.out.log", t.Name))) + if err != nil { + return errors.Wrap(err, "create stdout log") + } + defer outFile.Close() + errFile, err := os.Create(filepath.Join(logDir, fmt.Sprintf("%s.err.log", t.Name))) + if err != nil { + return errors.Wrap(err, "create stderr log") + } + defer errFile.Close() + stdout = io.MultiWriter(stdout, outFile) + stderr = io.MultiWriter(stderr, errFile) + } + + defer log.Println(color.New(color.BgRed).Sprint(" QUIT "), cb.Sprint(t.Name)) + rawBin := t.Command[0] + if t.Dir != "" && strings.HasPrefix(rawBin, ".") { + rawBin = filepath.Join(t.Dir, rawBin) + } + bin, err := exec.LookPath(rawBin) + if err != nil { + return errors.Wrapf(err, "lookup %s", rawBin) + } + bin, err = filepath.Abs(bin) + if err != nil { + return errors.Wrapf(err, "lookup %s", rawBin) + } + + if t.Before != nil { + err = t.Before.run(ctx, pad, attr|color.Faint, w) + if err != nil { + return errors.Wrap(err, "before") + } + } + if t.After != nil { + defer t.After.run(context.Background(), pad, attr|color.Faint, w) + } + + tick := time.NewTicker(time.Second) + defer tick.Stop() + + for { + procCtx, cancel := context.WithCancel(ctx) + hash := hashFile(bin) + if t.Watch { + go func() { + defer cancel() + t := time.NewTicker(time.Second) + for { + select { + case <-procCtx.Done(): + return + case <-t.C: + } + newHash := hashFile(bin) + if newHash == hash { + continue + } + return + } + }() + } + + cmd := exec.CommandContext(procCtx, bin, t.Command[1:]...) + cmd.Dir = t.Dir + cmd.Stdout = stdout + cmd.Stderr = stderr + cmd.Env = append(os.Environ(), t.Env...) + + if !t.Quiet { + log.Println(c.Sprint("Starting"), cb.Sprintf("%s[%s]", t.Name, hash), c.Sprint(bin+" "+strings.Join(t.Command[1:], " "))) + } + + err := cmd.Run() + cancel() + if err != nil && !t.IgnoreErrors { + return errors.Wrapf(err, "run %s", t.Name) + } + if !t.Restart { + break + } + select { + case <-tick.C: + case <-ctx.Done(): + return ctx.Err() + } + } + + return nil +} diff --git a/devtools/simpleproxy/main.go b/devtools/simpleproxy/main.go new file mode 100644 index 0000000000..fa7c109a16 --- /dev/null +++ b/devtools/simpleproxy/main.go @@ -0,0 +1,39 @@ +package main + +import ( + "flag" + "log" + "net/http" + "net/http/httputil" + "net/url" + "strings" +) + +func main() { + addr := flag.String("addr", ":3040", "Address to listen for HTTP traffic.") + flag.Parse() + + log.SetFlags(log.Lshortfile) + mux := http.NewServeMux() + for _, route := range flag.Args() { + parts := strings.SplitN(route, "=", 2) + if len(parts) == 1 { + parts = []string{"/", parts[0]} + } + + u, err := url.Parse(parts[1]) + if err != nil { + log.Fatalf("ERORR: parse %s: %v", parts[1], err) + } + + p := httputil.NewSingleHostReverseProxy(u) + mux.Handle(parts[0], p) + log.Printf("Registered: %s -> %s", parts[0], parts[1]) + } + + log.Println("Listening:", *addr) + err := http.ListenAndServe(*addr, mux) + if err != nil { + log.Println("ERROR: serve:", err) + } +} diff --git a/devtools/tools.go b/devtools/tools.go new file mode 100644 index 0000000000..a6d5c9490c --- /dev/null +++ b/devtools/tools.go @@ -0,0 +1,7 @@ +// +build tools + +package devtools + +import ( + _ "github.com/gordonklaus/ineffassign" +) diff --git a/devtools/waitfor/main.go b/devtools/waitfor/main.go new file mode 100644 index 0000000000..c68673d00a --- /dev/null +++ b/devtools/waitfor/main.go @@ -0,0 +1,69 @@ +package main + +import ( + "context" + "database/sql" + "flag" + "log" + "net/http" + "strings" + "time" + + _ "github.com/lib/pq" +) + +func waitForHTTP(ctx context.Context, url string) { + t := time.NewTicker(3 * time.Second) + defer t.Stop() + for { + _, err := http.Get(url) + if err == nil { + return + } + + log.Println("Waiting for", url, err) + select { + case <-ctx.Done(): + log.Fatal("Timeout waiting for", url) + case <-t.C: + } + } +} +func waitForPostgres(ctx context.Context, connStr string) { + t := time.NewTicker(3 * time.Second) + defer t.Stop() + db, err := sql.Open("postgres", connStr) + if err != nil { + log.Fatal("db open:", err) + } + defer db.Close() + for { + err = db.PingContext(ctx) + if err == nil { + return + } + + log.Println("Waiting for", connStr, err) + select { + case <-ctx.Done(): + log.Fatal("Timeout waiting for", connStr) + case <-t.C: + } + } +} + +func main() { + timeout := flag.Duration("timeout", time.Minute, "Timeout to wait for all checks to complete.") + flag.Parse() + + ctx, cancel := context.WithTimeout(context.Background(), *timeout) + defer cancel() + + for _, u := range flag.Args() { + if strings.HasPrefix(u, "postgres://") { + waitForPostgres(ctx, u) + } else { + waitForHTTP(ctx, u) + } + } +} diff --git a/engine/backend.go b/engine/backend.go new file mode 100644 index 0000000000..0dacd48728 --- /dev/null +++ b/engine/backend.go @@ -0,0 +1,49 @@ +package engine + +import ( + "context" + "database/sql" + "github.com/target/goalert/util" + "github.com/target/goalert/validation/validate" + + uuid "github.com/satori/go.uuid" +) + +type backend struct { + db *sql.DB + + findOne *sql.Stmt + + clientID string +} + +func newBackend(db *sql.DB) (*backend, error) { + p := &util.Prepare{DB: db} + + return &backend{ + db: db, + clientID: uuid.NewV4().String(), + + findOne: p.P(` + SELECT + id, + alert_id, + contact_method_id + FROM outgoing_messages + WHERE id = $1 + `), + }, p.Err +} + +func (b *backend) FindOne(ctx context.Context, id string) (*callback, error) { + err := validate.UUID("CallbackID", id) + if err != nil { + return nil, err + } + var c callback + err = b.findOne.QueryRowContext(ctx, id).Scan(c.fields()...) + if err != nil { + return nil, err + } + return &c, nil +} diff --git a/engine/callback.go b/engine/callback.go new file mode 100644 index 0000000000..12eb5a8c2d --- /dev/null +++ b/engine/callback.go @@ -0,0 +1,35 @@ +package engine + +import ( + "github.com/target/goalert/validation/validate" + + uuid "github.com/satori/go.uuid" +) + +type callback struct { + ID string + AlertID int + ContactMethodID string +} + +func (c callback) Normalize() (*callback, error) { + if c.ID == "" { + c.ID = uuid.NewV4().String() + } + err := validate.Many( + validate.UUID("ID", c.ID), + validate.UUID("ContactMethodID", c.ContactMethodID), + ) + if err != nil { + return nil, err + } + return &c, nil +} + +func (c *callback) fields() []interface{} { + return []interface{}{ + &c.ID, + &c.AlertID, + &c.ContactMethodID, + } +} diff --git a/engine/cleanupmanager/db.go b/engine/cleanupmanager/db.go new file mode 100644 index 0000000000..5357c414c0 --- /dev/null +++ b/engine/cleanupmanager/db.go @@ -0,0 +1,56 @@ +package cleanupmanager + +import ( + "context" + "database/sql" + "github.com/target/goalert/engine/processinglock" + "github.com/target/goalert/keyring" + "github.com/target/goalert/util" +) + +// DB handles updating escalation policies. +type DB struct { + db *sql.DB + lock *processinglock.Lock + + keys keyring.Keys + + orphanSlackChan *sql.Stmt + deleteChan *sql.Stmt +} + +// Name returns the name of the module. +func (db *DB) Name() string { return "Engine.CleanupManager" } + +// NewDB creates a new DB. +func NewDB(ctx context.Context, db *sql.DB, keys keyring.Keys) (*DB, error) { + lock, err := processinglock.NewLock(ctx, db, processinglock.Config{ + Version: 1, + Type: processinglock.TypeCleanup, + }) + if err != nil { + return nil, err + } + + p := &util.Prepare{Ctx: ctx, DB: db} + + return &DB{ + db: db, + lock: lock, + + keys: keys, + + orphanSlackChan: p.P(` + select + id, meta->>'tok' + from notification_channels + where + type = 'SLACK' and + id not in (select channel_id from escalation_policy_actions where channel_id notnull) + order by created_at + limit 15 + for update skip locked + `), + deleteChan: p.P(`delete from notification_channels where id = any($1)`), + }, p.Err +} diff --git a/engine/cleanupmanager/update.go b/engine/cleanupmanager/update.go new file mode 100644 index 0000000000..1b1c20ddf1 --- /dev/null +++ b/engine/cleanupmanager/update.go @@ -0,0 +1,63 @@ +package cleanupmanager + +import ( + "context" + "github.com/target/goalert/permission" + "github.com/target/goalert/util/log" + "net/http" + "net/url" + + "github.com/lib/pq" +) + +// UpdateAll will update the state of all active escalation policies. +func (db *DB) UpdateAll(ctx context.Context) error { + err := db.update(ctx) + return err +} + +func (db *DB) update(ctx context.Context) error { + err := permission.LimitCheckAny(ctx, permission.System) + if err != nil { + return err + } + log.Debugf(ctx, "Running cleanup operations.") + + tx, err := db.lock.BeginTx(ctx, nil) + if err != nil { + return err + } + defer tx.Rollback() + + rows, err := tx.StmtContext(ctx, db.orphanSlackChan).QueryContext(ctx) + if err != nil { + return err + } + defer rows.Close() + + var toDelete pq.StringArray + for rows.Next() { + var id, token string + err = rows.Scan(&id, &token) + if err != nil { + return err + } + log.Debugf(ctx, "cleanup notification channel %s", id) + data, _, err := db.keys.Decrypt([]byte(token)) + if err != nil { + return err + } + + // TODO: implement retry/backoff logic? + go http.Get("https://slack.com/api/auth.revoke?token=" + url.QueryEscape(string(data))) + + toDelete = append(toDelete, id) + } + + _, err = tx.StmtContext(ctx, db.deleteChan).ExecContext(ctx, toDelete) + if err != nil { + return err + } + + return tx.Commit() +} diff --git a/engine/config.go b/engine/config.go new file mode 100644 index 0000000000..350013e7f4 --- /dev/null +++ b/engine/config.go @@ -0,0 +1,31 @@ +package engine + +import ( + "github.com/target/goalert/alert" + alertlog "github.com/target/goalert/alert/log" + "github.com/target/goalert/config" + "github.com/target/goalert/keyring" + "github.com/target/goalert/notification" + "github.com/target/goalert/notificationchannel" + "github.com/target/goalert/user" + "github.com/target/goalert/user/contactmethod" +) + +// Config contains parameters for controlling how the Engine operates. +type Config struct { + AlertlogStore alertlog.Store + AlertStore alert.Store + ContactMethodStore contactmethod.Store + NotificationSender notification.Sender + UserStore user.Store + NotificationStore notification.Store + NCStore notificationchannel.Store + + ConfigSource config.Source + + Keys keyring.Keys + + MaxMessages int + + DisableCycle bool +} diff --git a/engine/engine.go b/engine/engine.go new file mode 100644 index 0000000000..8052464cab --- /dev/null +++ b/engine/engine.go @@ -0,0 +1,471 @@ +package engine + +import ( + "context" + "database/sql" + "github.com/target/goalert/alert" + "github.com/target/goalert/app/lifecycle" + "github.com/target/goalert/engine/escalationmanager" + "github.com/target/goalert/engine/heartbeatmanager" + "github.com/target/goalert/engine/message" + "github.com/target/goalert/engine/npcyclemanager" + "github.com/target/goalert/engine/processinglock" + "github.com/target/goalert/engine/rotationmanager" + "github.com/target/goalert/engine/schedulemanager" + "github.com/target/goalert/engine/statusupdatemanager" + "github.com/target/goalert/engine/verifymanager" + "github.com/target/goalert/notification" + "github.com/target/goalert/permission" + "github.com/target/goalert/user" + "github.com/target/goalert/user/contactmethod" + "github.com/target/goalert/util/log" + "time" + + "github.com/lib/pq" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +var errDisabledCM = errors.New("contact method is disabled") + +type updater interface { + Name() string + UpdateAll(context.Context) error +} + +// Engine handles automatic escaltion of unacknowledged(triggered) alerts, as well as +// passing to-be-sent notifications to the notification.Sender. +// +// Care is taken to ensure only one attempt is made per contact-method +// at a time, regardless of how many instances of the application may be running. +type Engine struct { + b *backend + mgr *lifecycle.Manager + + shutdownCh chan struct{} + triggerCh chan struct{} + runLoopExit chan struct{} + + nextCycle chan chan struct{} + + modules []updater + msg *message.DB + + am alert.Manager + cfg *Config + + triggerPauseCh chan *pauseReq +} + +type pauseReq struct { + ch chan error + ctx context.Context +} + +// NewEngine will create a new Engine using the passed *sql.DB as a backend. Outgoing +// notifications will be passed to Sender. +// +// Context is only used for preparing and initializing. +func NewEngine(ctx context.Context, db *sql.DB, c *Config) (*Engine, error) { + var err error + + p := &Engine{ + cfg: c, + shutdownCh: make(chan struct{}), + triggerCh: make(chan struct{}), + triggerPauseCh: make(chan *pauseReq), + runLoopExit: make(chan struct{}), + nextCycle: make(chan chan struct{}), + + am: c.AlertStore, + } + + p.mgr = lifecycle.NewManager(p._run, p._shutdown) + err = p.mgr.SetPauseResumer(lifecycle.PauseResumerFunc( + p._pause, + p._resume, + )) + if err != nil { + return nil, err + } + + rotMgr, err := rotationmanager.NewDB(ctx, db) + if err != nil { + return nil, errors.Wrap(err, "rotation management backend") + } + schedMgr, err := schedulemanager.NewDB(ctx, db) + if err != nil { + return nil, errors.Wrap(err, "schedule management backend") + } + epMgr, err := escalationmanager.NewDB(ctx, db, c.AlertlogStore) + if err != nil { + return nil, errors.Wrap(err, "alert escalation backend") + } + ncMgr, err := npcyclemanager.NewDB(ctx, db) + if err != nil { + return nil, errors.Wrap(err, "notification cycle backend") + } + statMgr, err := statusupdatemanager.NewDB(ctx, db) + if err != nil { + return nil, errors.Wrap(err, "status update backend") + } + verifyMgr, err := verifymanager.NewDB(ctx, db) + if err != nil { + return nil, errors.Wrap(err, "verification backend") + } + hbMgr, err := heartbeatmanager.NewDB(ctx, db, c.AlertStore) + if err != nil { + return nil, errors.Wrap(err, "heartbeat processing backend") + } + // cleanMgr, err := cleanupmanager.NewDB(ctx, db, c.CredKeyring) + // if err != nil { + // return nil, errors.Wrap(err, "cleanup backend") + // } + + p.modules = []updater{ + rotMgr, + schedMgr, + epMgr, + ncMgr, + statMgr, + verifyMgr, + hbMgr, + // cleanMgr, + } + + p.msg, err = message.NewDB(ctx, db, &message.Config{ + MaxMessagesPerCycle: c.MaxMessages, + RateLimit: map[notification.DestType]*message.RateConfig{ + notification.DestTypeSMS: &message.RateConfig{PerSecond: 1, Batch: 5 * time.Second}, + notification.DestTypeVoice: &message.RateConfig{PerSecond: 1, Batch: 5 * time.Second}, + }, + Pausable: p.mgr, + }) + if err != nil { + return nil, errors.Wrap(err, "messaging backend") + } + + p.b, err = newBackend(db) + if err != nil { + return nil, errors.Wrap(err, "init backend") + } + + return p, nil +} + +// WaitNextCycle will return after the next engine cycle starts and then finishes. +func (p *Engine) WaitNextCycle(ctx context.Context) error { + select { + case ch := <-p.nextCycle: + select { + case <-ch: + return nil + case <-ctx.Done(): + return ctx.Err() + } + case <-ctx.Done(): + return ctx.Err() + } +} + +func (p *Engine) processModule(ctx context.Context, m updater) { + defer recoverPanic(ctx, m.Name()) + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + for { + err := m.UpdateAll(ctx) + if pErr, ok := errors.Cause(err).(*pq.Error); ctx.Err() == nil && ok && pErr.Code.Class() == "40" { + // Class `40` is a transaction failure. + // In that case we will retry, so long + // as the context deadline has not been reached. + // + // https://www.postgresql.org/docs/9.6/static/errcodes-appendix.html + continue + } + if err != nil && errors.Cause(err) != processinglock.ErrNoLock { + log.Log(ctx, errors.Wrap(err, m.Name())) + } + break + } +} + +func (p *Engine) processMessages(ctx context.Context) { + ctx, sp := trace.StartSpan(ctx, "Engine.MessageManager") + defer sp.End() + defer recoverPanic(ctx, "MessageManager") + ctx, cancel := context.WithTimeout(ctx, time.Minute) + defer cancel() + + err := p.msg.SendMessages(ctx, func(ctx context.Context, m *message.Message) (*notification.MessageStatus, error) { + switch m.Type { + case message.TypeAlertNotification: + return p.sendNotification(ctx, m.ID, m.AlertID, m.DestType, m.DestID) + case message.TypeAlertStatusUpdate: + return p.sendStatusUpdate(ctx, m.ID, m.AlertLogID, m.DestType, m.DestID) + case message.TypeTestNotification: + return p.sendTestNotification(ctx, m.ID, m.DestType, m.DestID) + case message.TypeVerificationMessage: + return p.sendVerificationMessage(ctx, m.ID, m.DestType, m.DestID, m.VerifyID) + } + + log.Log(ctx, errors.New("SEND NOT IMPLEMENTED FOR MESSAGE TYPE")) + return ¬ification.MessageStatus{State: notification.MessageStateFailedPerm}, nil + }, p.cfg.NotificationSender.Status) + if errors.Cause(err) == processinglock.ErrNoLock { + return + } + if errors.Cause(err) == message.ErrAbort { + return + } + if err != nil { + log.Log(ctx, errors.Wrap(err, "send outgoing messages")) + } +} + +func recoverPanic(ctx context.Context, name string) { + err := recover() + if err == nil { + return + } + + if e, ok := err.(error); ok { + log.Log(ctx, errors.Wrapf(e, "PANIC in %s", name)) + } else { + log.Log(ctx, errors.Errorf("PANIC in %s: %+v", name, err)) + } +} + +// Trigger will force notifications to be processed immediately. +func (p *Engine) Trigger() { + <-p.triggerCh +} + +// Pause will attempt to gracefully stop engine processing. +func (p *Engine) Pause(ctx context.Context) error { + ctx, sp := trace.StartSpan(ctx, "Engine.Pause") + defer sp.End() + + return p.mgr.Pause(ctx) +} +func (p *Engine) _pause(ctx context.Context) error { + ch := make(chan error, 1) + + select { + case <-p.shutdownCh: + return errors.New("shutting down") + case <-ctx.Done(): + return ctx.Err() + case p.triggerPauseCh <- &pauseReq{ch: ch, ctx: ctx}: + select { + case <-ctx.Done(): + defer p.Resume(ctx) + return ctx.Err() + case err := <-ch: + return err + } + } +} + +// Resume will allow the engine to resume processing. +func (p *Engine) Resume(ctx context.Context) error { + return p.mgr.Resume(ctx) +} +func (p *Engine) _resume(ctx context.Context) error { + // nothing to be done `p.mgr.IsPaused` will already + // return false + return nil +} + +// Run will being the engine loop. +func (p *Engine) Run(ctx context.Context) error { + return p.mgr.Run(ctx) +} + +// Shutdown will gracefully shutdown the processor, finishing any ongoing tasks. +func (p *Engine) Shutdown(ctx context.Context) error { + if p == nil { + return nil + } + ctx, sp := trace.StartSpan(ctx, "Engine.Shutdown") + defer sp.End() + + return p.mgr.Shutdown(ctx) +} +func (p *Engine) _shutdown(ctx context.Context) error { + close(p.shutdownCh) + <-p.runLoopExit + return nil +} + +// UpdateStatus will update the status of a message. +func (p *Engine) UpdateStatus(ctx context.Context, status *notification.MessageStatus) error { + var err error + permission.SudoContext(ctx, func(ctx context.Context) { + err = p.msg.UpdateMessageStatus(ctx, status) + }) + return err +} + +// Receive will process a notification result. +func (p *Engine) Receive(ctx context.Context, callbackID string, result notification.Result) error { + ctx, sp := trace.StartSpan(ctx, "Engine.Receive") + defer sp.End() + cb, err := p.b.FindOne(ctx, callbackID) + if err != nil { + return err + } + + var usr *user.User + permission.SudoContext(ctx, func(ctx context.Context) { + cm, serr := p.cfg.ContactMethodStore.FindOne(ctx, cb.ContactMethodID) + if serr != nil { + err = errors.Wrap(serr, "lookup contact method") + return + } + usr, serr = p.cfg.UserStore.FindOne(ctx, cm.UserID) + if serr != nil { + err = errors.Wrap(serr, "lookup user") + } + }) + if err != nil { + return err + } + ctx = permission.UserSourceContext(ctx, usr.ID, usr.Role, &permission.SourceInfo{ + Type: permission.SourceTypeNotificationCallback, + ID: callbackID, + }) + + switch result { + case notification.ResultAcknowledge: + return p.am.UpdateStatus(ctx, cb.AlertID, alert.StatusActive) + case notification.ResultResolve: + return p.am.UpdateStatus(ctx, cb.AlertID, alert.StatusClosed) + } + + return errors.New("unknown result") +} + +// Stop will disable all associated contact methods associated with `value` of type `t`. This is should +// be invoked if a user, for example, responds with `STOP` via SMS. +func (p *Engine) Stop(ctx context.Context, d notification.Dest) error { + if !d.Type.IsUserCM() { + return errors.New("stop only supported on user contact methods") + } + var err error + + permission.SudoContext(ctx, func(ctx context.Context) { + err = p.cfg.ContactMethodStore.DisableByValue(ctx, contactmethod.TypeFromDestType(d.Type), d.Value) + }) + + return err +} + +func (p *Engine) processAll(ctx context.Context) bool { + for _, m := range p.modules { + if p.mgr.IsPausing() { + return true + } + ctx, sp := trace.StartSpan(ctx, m.Name()) + p.processModule(ctx, m) + sp.End() + } + return false +} +func (p *Engine) cycle(ctx context.Context) { + ctx, sp := trace.StartSpan(ctx, "Engine.Cycle") + defer sp.End() + + ctx = p.cfg.ConfigSource.Config().Context(ctx) + + ch := make(chan struct{}) + defer close(ch) +passSignals: + for { + select { + case p.nextCycle <- ch: + default: + break passSignals + } + } + + if p.mgr.IsPausing() { + log.Logf(ctx, "Engine cycle disabled (paused or shutting down).") + sp.AddAttributes(trace.BoolAttribute("cycle.skip", true)) + return + } + + log.Logf(ctx, "Engine cycle start.") + defer log.Logf(ctx, "Engine cycle end.") + + aborted := p.processAll(ctx) + if aborted || p.mgr.IsPausing() { + sp.Annotate([]trace.Attribute{trace.BoolAttribute("cycle.abort", true)}, "Cycle aborted.") + log.Logf(ctx, "Engine cycle aborted (paused or shutting down).") + return + } + p.processMessages(ctx) +} +func (p *Engine) handlePause(ctx context.Context, respCh chan error) { + // nothing special to do currently + respCh <- nil +} + +func (p *Engine) _run(ctx context.Context) error { + defer close(p.runLoopExit) + ctx = permission.SystemContext(ctx, "Engine") + if p.cfg.DisableCycle { + log.Logf(ctx, "Engine started in API-only mode.") + for { + select { + case req := <-p.triggerPauseCh: + req.ch <- nil + case <-ctx.Done(): + return ctx.Err() + case <-p.shutdownCh: + return nil + case p.triggerCh <- struct{}{}: + log.Logf(ctx, "Ignoring engine trigger (API-only mode).") + } + } + } + + alertTicker := time.NewTicker(5 * time.Second) + defer alertTicker.Stop() + + defer close(p.triggerCh) + + p.cycle(ctx) + + for { + // give priority to pending shutdown signals + // otherwise if the processing loop takes longer than + // 5 seconds, it may never shut down. + select { + case req := <-p.triggerPauseCh: + p.handlePause(req.ctx, req.ch) + case <-ctx.Done(): + // run context canceled or something + return ctx.Err() + case <-p.shutdownCh: + // shutdown requested + return nil + default: + } + + select { + case req := <-p.triggerPauseCh: + p.handlePause(req.ctx, req.ch) + case p.triggerCh <- struct{}{}: + p.cycle(log.WithField(ctx, "Trigger", "DIRECT")) + case <-alertTicker.C: + p.cycle(log.WithField(ctx, "Trigger", "INTERVAL")) + case <-ctx.Done(): + // context canceled or something + return ctx.Err() + case <-p.shutdownCh: + // shutdown requested + return nil + } + } +} diff --git a/engine/escalationmanager/db.go b/engine/escalationmanager/db.go new file mode 100644 index 0000000000..62fecf88ec --- /dev/null +++ b/engine/escalationmanager/db.go @@ -0,0 +1,262 @@ +package escalationmanager + +import ( + "context" + "database/sql" + alertlog "github.com/target/goalert/alert/log" + "github.com/target/goalert/engine/processinglock" + "github.com/target/goalert/util" +) + +// DB handles updating escalation policies. +type DB struct { + lock *processinglock.Lock + + cleanupNoSteps *sql.Stmt + + lockStmt *sql.Stmt + updateOnCall *sql.Stmt + + newPolicies *sql.Stmt + deletedSteps *sql.Stmt + normalEscalation *sql.Stmt + + log alertlog.Store +} + +// Name returns the name of the module. +func (db *DB) Name() string { return "Engine.EscalationManager" } + +// NewDB creates a new DB. +func NewDB(ctx context.Context, db *sql.DB, log alertlog.Store) (*DB, error) { + lock, err := processinglock.NewLock(ctx, db, processinglock.Config{ + Version: 3, + Type: processinglock.TypeEscalation, + }) + if err != nil { + return nil, err + } + + p := &util.Prepare{Ctx: ctx, DB: db} + + return &DB{ + log: log, + lock: lock, + + lockStmt: p.P(`lock escalation_policy_steps in share mode`), + + updateOnCall: p.P(` + with on_call as ( + select + step.id step_id, + coalesce(act.user_id, part.user_id, sched.user_id) user_id + from escalation_policy_steps step + join escalation_policy_actions act on act.escalation_policy_step_id = step.id + left join rotation_state rState on rState.rotation_id = act.rotation_id + left join rotation_participants part on part.id = rState.rotation_participant_id + left join schedule_on_call_users sched on sched.schedule_id = act.schedule_id and sched.end_time isnull + where coalesce(act.user_id, part.user_id, sched.user_id) notnull + ), ended as ( + select + ep_step_id step_id, + user_id + from ep_step_on_call_users + where end_time isnull + except + select step_id, user_id + from on_call + ), _end as ( + update ep_step_on_call_users ep + set end_time = now() + from ended + where + ep.ep_step_id = ended.step_id and + ep.user_id = ended.user_id and + ep.end_time isnull + ) + insert into ep_step_on_call_users (ep_step_id, user_id) + select step_id, user_id + from on_call + on conflict do nothing + returning ep_step_id, user_id + `), + + cleanupNoSteps: p.P(` + delete from escalation_policy_state state + using escalation_policies pol + where + state.escalation_policy_step_id isnull and + pol.id = state.escalation_policy_id and + pol.step_count = 0 + `), + + newPolicies: p.P(` + with to_escalate as ( + select alert_id, step.id ep_step_id, step.delay, step.escalation_policy_id, a.service_id + from escalation_policy_state state + join escalation_policy_steps step on + step.escalation_policy_id = state.escalation_policy_id and + step.step_number = 0 + join alerts a on a.id = state.alert_id and (a.status = 'triggered' or state.force_escalation) + where state.last_escalation isnull + for update skip locked + limit 1000 + ), _cycles as ( + insert into notification_policy_cycles (alert_id, user_id) + select esc.alert_id, on_call.user_id + from to_escalate esc + join ep_step_on_call_users on_call on + on_call.end_time isnull and + on_call.ep_step_id = esc.ep_step_id + ), _channels as ( + insert into outgoing_messages (message_type, alert_id, service_id, escalation_policy_id, channel_id) + select + cast('alert_notification' as enum_outgoing_messages_type), + esc.alert_id, + esc.service_id, + esc.escalation_policy_id, + act.channel_id + from to_escalate esc + join escalation_policy_actions act on + act.channel_id notnull and + act.escalation_policy_step_id = esc.ep_step_id + ) + update escalation_policy_state state + set + last_escalation = now(), + next_escalation = now() + (cast(esc.delay as text)||' minutes')::interval, + escalation_policy_step_id = esc.ep_step_id, + force_escalation = false + from + to_escalate esc + where + state.alert_id = esc.alert_id + returning state.alert_id + `), + + deletedSteps: p.P(` + with to_escalate as ( + select + alert_id, + step.id ep_step_id, + step.step_number, + step.delay, + state.escalation_policy_step_number >= ep.step_count repeated, + a.service_id, + step.escalation_policy_id + from escalation_policy_state state + join alerts a on a.id = state.alert_id and (a.status = 'triggered' or state.force_escalation) + join escalation_policies ep on ep.id = state.escalation_policy_id + join escalation_policy_steps step on + step.escalation_policy_id = state.escalation_policy_id and + step.step_number = CASE + WHEN state.escalation_policy_step_number >= ep.step_count THEN 0 + ELSE state.escalation_policy_step_number + END + where + state.last_escalation notnull and + escalation_policy_step_id isnull + for update skip locked + limit 100 + ), _cycles as ( + insert into notification_policy_cycles (alert_id, user_id) + select esc.alert_id, on_call.user_id + from to_escalate esc + join ep_step_on_call_users on_call on + on_call.end_time isnull and + on_call.ep_step_id = esc.ep_step_id + ), _channels as ( + insert into outgoing_messages (message_type, alert_id, service_id, escalation_policy_id, channel_id) + select + cast('alert_notification' as enum_outgoing_messages_type), + esc.alert_id, + esc.service_id, + esc.escalation_policy_id, + act.channel_id + from to_escalate esc + join escalation_policy_actions act on + act.channel_id notnull and + act.escalation_policy_step_id = esc.ep_step_id + ) + update escalation_policy_state state + set + last_escalation = now(), + next_escalation = now() + (cast(esc.delay as text)||' minutes')::interval, + escalation_policy_step_number = esc.step_number, + escalation_policy_step_id = esc.ep_step_id, + force_escalation = false + from + to_escalate esc + where + state.alert_id = esc.alert_id + returning esc.alert_id, esc.repeated, esc.step_number + `), + + normalEscalation: p.P(` + with to_escalate as ( + select + alert_id, + nextStep.id ep_step_id, + nextStep.delay, + nextStep.step_number, + force_escalation forced, + oldStep.delay old_delay, + oldStep.step_number + 1 >= ep.step_count repeated, + nextStep.escalation_policy_id, + a.service_id + from escalation_policy_state state + join alerts a on a.id = state.alert_id and (a.status = 'triggered' or state.force_escalation) + join escalation_policies ep on ep.id = state.escalation_policy_id + join escalation_policy_steps oldStep on oldStep.id = escalation_policy_step_id + join escalation_policy_steps nextStep on + nextStep.escalation_policy_id = state.escalation_policy_id and + nextStep.step_number = CASE + WHEN oldStep.step_number + 1 < ep.step_count THEN + oldStep.step_number + 1 + WHEN force_escalation OR ep.repeat = -1 THEN 0 + WHEN state.loop_count < ep.repeat THEN 0 + ELSE -1 + END + where + state.last_escalation notnull and + escalation_policy_step_id notnull and + (next_escalation < now() or force_escalation) + order by next_escalation - now() + for update skip locked + limit 500 + ), _cycles as ( + insert into notification_policy_cycles (alert_id, user_id) + select esc.alert_id, on_call.user_id + from to_escalate esc + join ep_step_on_call_users on_call on + on_call.end_time isnull and + on_call.ep_step_id = esc.ep_step_id + ), _channels as ( + insert into outgoing_messages (message_type, alert_id, service_id, escalation_policy_id, channel_id) + select + cast('alert_notification' as enum_outgoing_messages_type), + esc.alert_id, + esc.service_id, + esc.escalation_policy_id, + act.channel_id + from to_escalate esc + join escalation_policy_actions act on + act.channel_id notnull and + act.escalation_policy_step_id = esc.ep_step_id + ) + update escalation_policy_state state + set + last_escalation = now(), + next_escalation = now() + (cast(esc.delay as text)||' minutes')::interval, + escalation_policy_step_number = esc.step_number, + escalation_policy_step_id = esc.ep_step_id, + loop_count = CASE WHEN esc.repeated THEN loop_count + 1 ELSE loop_count END, + force_escalation = false + from + to_escalate esc + where + state.alert_id = esc.alert_id + returning esc.alert_id, esc.repeated, esc.step_number, esc.old_delay, esc.forced + `), + }, p.Err +} diff --git a/engine/escalationmanager/update.go b/engine/escalationmanager/update.go new file mode 100644 index 0000000000..fbed09894b --- /dev/null +++ b/engine/escalationmanager/update.go @@ -0,0 +1,118 @@ +package escalationmanager + +import ( + "context" + "database/sql" + alertlog "github.com/target/goalert/alert/log" + "github.com/target/goalert/permission" + "github.com/target/goalert/util/log" + + "github.com/pkg/errors" +) + +// UpdateAll will update the state of all active escalation policies. +func (db *DB) UpdateAll(ctx context.Context) error { + err := db.update(ctx, true, nil) + return err +} + +func (db *DB) update(ctx context.Context, all bool, alertID *int) error { + err := permission.LimitCheckAny(ctx, permission.System) + if err != nil { + return err + } + log.Debugf(ctx, "Updating alert escalations.") + + tx, err := db.lock.BeginTx(ctx, nil) + if err != nil { + return errors.Wrap(err, "begin tx") + } + defer tx.Rollback() + + _, err = tx.StmtContext(ctx, db.lockStmt).ExecContext(ctx) + if err != nil { + return errors.Wrap(err, "lock ep step table") + } + _, err = tx.StmtContext(ctx, db.updateOnCall).ExecContext(ctx) + if err != nil { + return errors.Wrap(err, "update ep step on-call") + } + err = tx.Commit() + if err != nil { + return errors.Wrap(err, "commit on-call update") + } + + _, err = db.lock.Exec(ctx, db.cleanupNoSteps) + if err != nil { + return errors.Wrap(err, "end policies with no steps") + } + + err = db.processEscalations(ctx, db.newPolicies, func(rows *sql.Rows) (int, *alertlog.EscalationMetaData, error) { + var id int + err := rows.Scan(&id) + return id, &alertlog.EscalationMetaData{}, err + }) + if err != nil { + return errors.Wrap(err, "trigger new policies") + } + + err = db.processEscalations(ctx, db.deletedSteps, func(rows *sql.Rows) (int, *alertlog.EscalationMetaData, error) { + var id int + var meta alertlog.EscalationMetaData + err := rows.Scan(&id, &meta.Repeat, &meta.NewStepIndex) + return id, &meta, err + }) + if err != nil { + return errors.Wrap(err, "escalate policies with deleted steps") + } + + err = db.processEscalations(ctx, db.normalEscalation, func(rows *sql.Rows) (int, *alertlog.EscalationMetaData, error) { + var id int + var meta alertlog.EscalationMetaData + err := rows.Scan(&id, &meta.Repeat, &meta.NewStepIndex, &meta.OldDelayMinutes, &meta.Forced) + return id, &meta, err + }) + if err != nil { + return errors.Wrap(err, "escalate forced or expired") + } + + return nil +} + +func (db *DB) processEscalations(ctx context.Context, stmt *sql.Stmt, scan func(*sql.Rows) (int, *alertlog.EscalationMetaData, error)) error { + tx, err := db.lock.BeginTx(ctx, nil) + if err != nil { + return err + } + defer tx.Rollback() + + rows, err := tx.StmtContext(ctx, stmt).QueryContext(ctx) + if err != nil { + return err + } + defer rows.Close() + + type record struct { + alertID int + esc *alertlog.EscalationMetaData + } + + var data []record + for rows.Next() { + var rec record + rec.alertID, rec.esc, err = scan(rows) + if err != nil { + return err + } + data = append(data, rec) + } + + for _, rec := range data { + err = db.log.LogTx(ctx, tx, rec.alertID, alertlog.TypeEscalated, rec.esc) + if err != nil { + return errors.Wrap(err, "log escalation") + } + } + + return tx.Commit() +} diff --git a/engine/heartbeatmanager/db.go b/engine/heartbeatmanager/db.go new file mode 100644 index 0000000000..3da7938674 --- /dev/null +++ b/engine/heartbeatmanager/db.go @@ -0,0 +1,74 @@ +package heartbeatmanager + +import ( + "context" + "database/sql" + "github.com/target/goalert/alert" + "github.com/target/goalert/engine/processinglock" + "github.com/target/goalert/util" +) + +// DB processes heartbeats. +type DB struct { + lock *processinglock.Lock + + alertStore alert.Store + + fetchFailed *sql.Stmt + fetchHealthy *sql.Stmt +} + +// Name returns the name of the module. +func (db *DB) Name() string { return "Engine.HeartbeatManager" } + +// NewDB creates a new DB. +func NewDB(ctx context.Context, db *sql.DB, a alert.Store) (*DB, error) { + lock, err := processinglock.NewLock(ctx, db, processinglock.Config{ + Type: processinglock.TypeHeartbeat, + Version: 1, + }) + if err != nil { + return nil, err + } + + p := &util.Prepare{Ctx: ctx, DB: db} + + return &DB{ + lock: lock, + alertStore: a, + + // if checked is still false after processing, we can delete it + fetchFailed: p.P(` + with rows as ( + select id + from heartbeat_monitors + where + last_state != 'unhealthy' and + now() - last_heartbeat >= heartbeat_interval + limit 250 + for update skip locked + ) + update heartbeat_monitors mon + set last_state = 'unhealthy' + from rows + where mon.id = rows.id + returning mon.id, name, service_id, last_heartbeat + `), + fetchHealthy: p.P(` + with rows as ( + select id + from heartbeat_monitors + where + last_state != 'healthy' and + now() - last_heartbeat < heartbeat_interval + limit 250 + for update skip locked + ) + update heartbeat_monitors mon + set last_state = 'healthy' + from rows + where mon.id = rows.id + returning mon.id, service_id + `), + }, p.Err +} diff --git a/engine/heartbeatmanager/process.go b/engine/heartbeatmanager/process.go new file mode 100644 index 0000000000..0cc4936872 --- /dev/null +++ b/engine/heartbeatmanager/process.go @@ -0,0 +1,153 @@ +package heartbeatmanager + +import ( + "context" + "database/sql" + "fmt" + "github.com/target/goalert/alert" + "github.com/target/goalert/permission" + "github.com/target/goalert/util/log" + "time" + + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// UpdateAll will process all heartbeats opening and closing alerts as needed. +func (db *DB) UpdateAll(ctx context.Context) error { + err := db.processAll(ctx) + return err +} + +func (db *DB) processAll(ctx context.Context) error { + err := permission.LimitCheckAny(ctx, permission.System) + if err != nil { + return err + } + log.Debugf(ctx, "Processing heartbeats.") + + tx, err := db.lock.BeginTx(ctx, nil) + if err != nil { + return errors.Wrap(err, "start transaction") + } + defer tx.Rollback() + + var newAlertCtx []context.Context + var newAlerts []alert.Alert + bad, err := db.unhealthy(ctx, tx) + if err != nil { + return errors.Wrap(err, "fetch unhealthy heartbeats") + } + for _, row := range bad { + a, isNew, err := db.alertStore.CreateOrUpdateTx(row.Context(ctx), tx, &alert.Alert{ + Summary: fmt.Sprintf("Heartbeat monitor '%s' expired.", row.Name), + Details: "Last heartbeat: " + row.LastHeartbeat.Format(time.UnixDate), + Status: alert.StatusTriggered, + ServiceID: row.ServiceID, + Dedup: &alert.DedupID{ + Type: alert.DedupTypeHeartbeat, + Version: 1, + Payload: row.ID, + }, + }) + if err != nil { + return errors.Wrap(err, "create alert") + } + if isNew { + // Store contexts with alert info for each alert that was newly-created. + newAlertCtx = append(newAlertCtx, log.WithFields(row.Context(ctx), log.Fields{ + "AlertID": a.ID, + "ServiceID": a.ServiceID, + })) + newAlerts = append(newAlerts, *a) + } + } + good, err := db.healthy(ctx, tx) + if err != nil { + return errors.Wrap(err, "fetch healthy heartbeats") + } + for _, row := range good { + _, _, err = db.alertStore.CreateOrUpdateTx(row.Context(ctx), tx, &alert.Alert{ + Status: alert.StatusClosed, + ServiceID: row.ServiceID, + Dedup: &alert.DedupID{ + Type: alert.DedupTypeHeartbeat, + Version: 1, + Payload: row.ID, + }, + }) + if err != nil { + return errors.Wrap(err, "close alert") + } + } + + err = tx.Commit() + if err != nil { + return err + } + + // log new alert creations, after the tx was committed without err. + for _, ctx := range newAlertCtx { + log.Logf(ctx, "Alert created.") + + } + for _, n := range newAlerts { + trace.FromContext(ctx).Annotate( + []trace.Attribute{ + trace.StringAttribute("service.id", n.ServiceID), + trace.Int64Attribute("alert.id", int64(n.ID)), + }, + "Alert created.", + ) + } + return nil +} + +type row struct { + ID string + Name string + ServiceID string + LastHeartbeat time.Time +} + +func (r row) Context(ctx context.Context) context.Context { + return permission.ServiceSourceContext(permission.WithoutAuth(ctx), r.ServiceID, &permission.SourceInfo{ + Type: permission.SourceTypeHeartbeat, + ID: r.ID, + }) +} + +func (db *DB) unhealthy(ctx context.Context, tx *sql.Tx) ([]row, error) { + rows, err := tx.Stmt(db.fetchFailed).QueryContext(ctx) + if err != nil { + return nil, err + } + defer rows.Close() + var result []row + for rows.Next() { + var r row + err = rows.Scan(&r.ID, &r.Name, &r.ServiceID, &r.LastHeartbeat) + if err != nil { + return nil, err + } + result = append(result, r) + } + return result, nil +} +func (db *DB) healthy(ctx context.Context, tx *sql.Tx) ([]row, error) { + rows, err := tx.Stmt(db.fetchHealthy).QueryContext(ctx) + if err != nil { + return nil, err + } + defer rows.Close() + var result []row + for rows.Next() { + var r row + err = rows.Scan(&r.ID, &r.ServiceID) + if err != nil { + return nil, err + } + result = append(result, r) + } + return result, nil +} diff --git a/engine/message/config.go b/engine/message/config.go new file mode 100644 index 0000000000..d71a4bd652 --- /dev/null +++ b/engine/message/config.go @@ -0,0 +1,82 @@ +package message + +import ( + "github.com/target/goalert/app/lifecycle" + "github.com/target/goalert/notification" + "time" +) + +// RateConfig allows setting egress rate limiting on messages. +type RateConfig struct { + // PerSecond determines the target messages-per-second limit. + PerSecond int + + // Batch sets how often granularity of the rate limit. + Batch time.Duration +} + +// Config is used to configure the message sender. +type Config struct { + // MaxMessagesPerCycle determines the number of pending messages + // fetched per-cycle for delivery. + // + // Defaults to 50. + MaxMessagesPerCycle int + + // RateLimit allows configuring rate limits per contact-method type. + RateLimit map[notification.DestType]*RateConfig + + // Pausable is optional, and allows early-abort of + // message sending when IsPaused returns true. + Pausable lifecycle.Pausable +} + +// batchNum returns the maximum number of messages to be sent per-cycle for the given type. +// If there is no limit, 0 is returned. +func (c Config) batchNum(t notification.DestType) int { + bDur := c.batch(t) + if bDur == 0 { + return 0 + } + pSec := c.perSecond(t) + if pSec == 0 { + return 0 + } + + max := int(bDur.Seconds() * float64(pSec)) + return max +} + +// perSecond returns the number of messages to send per-second. +func (c Config) perSecond(t notification.DestType) int { + cfg := c.RateLimit[t] + if cfg == nil { + return 0 + } + return cfg.PerSecond +} + +// batch returns the duration for a batch of messages. +func (c Config) batch(t notification.DestType) time.Duration { + cfg := c.RateLimit[t] + if cfg == nil { + return time.Duration(0) + } + return cfg.Batch +} + +// DefaultConfig returns the default configuration. +func DefaultConfig() *Config { + return &Config{ + RateLimit: map[notification.DestType]*RateConfig{ + notification.DestTypeSMS: &RateConfig{ + PerSecond: 1, + Batch: 5 * time.Second, + }, + notification.DestTypeVoice: &RateConfig{ + PerSecond: 1, + Batch: 5 * time.Second, + }, + }, + } +} diff --git a/engine/message/db.go b/engine/message/db.go new file mode 100644 index 0000000000..ab1289f4a1 --- /dev/null +++ b/engine/message/db.go @@ -0,0 +1,767 @@ +package message + +import ( + "context" + "database/sql" + "fmt" + "github.com/target/goalert/engine/processinglock" + "github.com/target/goalert/lock" + "github.com/target/goalert/notification" + "github.com/target/goalert/notificationchannel" + "github.com/target/goalert/permission" + "github.com/target/goalert/retry" + "github.com/target/goalert/user/contactmethod" + "github.com/target/goalert/util" + "github.com/target/goalert/util/log" + "github.com/target/goalert/validation/validate" + "time" + + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// DB implements a priority message sender using Postgres. +type DB struct { + lock *processinglock.Lock + + c Config + + stuckMessages *sql.Stmt + + setSending *sql.Stmt + + lockStmt *sql.Stmt + pending *sql.Stmt + currentTime *sql.Stmt + retryReset *sql.Stmt + retryClear *sql.Stmt + + sendDeadlineExpired *sql.Stmt + + failDisabledCM *sql.Stmt + + sentByCMType *sql.Stmt + + updateCMStatusUpdate *sql.Stmt + cleanupStatusUpdateOptOut *sql.Stmt + + tempFail *sql.Stmt + permFail *sql.Stmt + updateStatus *sql.Stmt + + advLock *sql.Stmt + advLockCleanup *sql.Stmt +} + +// NewDB creates a new DB. If config is nil, DefaultConfig() is used. +func NewDB(ctx context.Context, db *sql.DB, c *Config) (*DB, error) { + lock, err := processinglock.NewLock(ctx, db, processinglock.Config{ + Type: processinglock.TypeMessage, + Version: 6, + }) + if err != nil { + return nil, err + } + p := &util.Prepare{DB: db, Ctx: ctx} + + if c == nil { + c = DefaultConfig() + } + err = validate.Range("MaxMessagesPerCycle", c.MaxMessagesPerCycle, 0, 9000) + if err != nil { + return nil, err + } + if c.MaxMessagesPerCycle == 0 { + c.MaxMessagesPerCycle = 50 + } + + tempFail := p.P(` + update outgoing_messages + set + last_status = 'failed', + last_status_at = now(), + status_details = $3, + provider_msg_id = coalesce($2, provider_msg_id), + next_retry_at = CASE WHEN retry_count < 3 THEN now() + '15 seconds'::interval ELSE null END + where id = $1 or provider_msg_id = $2 + `) + permFail := p.P(` + update outgoing_messages + set + last_status = 'failed', + last_status_at = now(), + status_details = $3, + cycle_id = null, + provider_msg_id = coalesce($2, provider_msg_id), + next_retry_at = null + where id = $1 or provider_msg_id = $2 + `) + updateStatus := p.P(` + update outgoing_messages + set + last_status = cast($4 as enum_outgoing_messages_status), + last_status_at = now(), + status_details = $5, + cycle_id = null, + sending_deadline = null, + sent_at = coalesce(sent_at, fired_at, now()), + fired_at = null, + provider_msg_id = coalesce($2, provider_msg_id), + provider_seq = CASE WHEN $3 = -1 THEN provider_seq ELSE $3 END, + next_retry_at = null + where + (id = $1 or provider_msg_id = $2) and + (provider_seq <= $3 or $3 = -1) and + last_status not in ('failed', 'pending') + `) + if p.Err != nil { + return nil, p.Err + } + return &DB{ + lock: lock, + c: *c, + + updateStatus: updateStatus, + tempFail: tempFail, + permFail: permFail, + + advLock: p.P(`select pg_advisory_lock($1)`), + advLockCleanup: p.P(` + select pg_terminate_backend(lock.pid) + from pg_locks lock + join pg_database pgdat on + datname = current_database() and + lock.database = pgdat.oid + join pg_stat_activity act on + act.datid = pgdat.oid and + act.pid = lock.pid and + act.state = 'idle' and + act.state_change < now() - '1 minute'::interval + where objid = $1 and locktype = 'advisory' and granted + `), + + stuckMessages: p.P(` + with sel as ( + select id, provider_msg_id + from outgoing_messages msg + where + last_status = 'queued_remotely' and + last_status_at < now()-'1 minute'::interval and + provider_msg_id notnull + order by + last_status_at + limit 10 + for update + ) + update outgoing_messages msg + set last_status_at = now() + from sel + where msg.id = sel.id + returning msg.id, msg.provider_msg_id + `), + + sentByCMType: p.P(` + select count(*) + from outgoing_messages msg + join user_contact_methods cm on cm.id = msg.contact_method_id + where msg.sent_at > $1 and cm.type = $2 + `), + + updateCMStatusUpdate: p.P(` + update outgoing_messages msg + set contact_method_id = usr.alert_status_log_contact_method_id + from users usr + where + msg.message_type = 'alert_status_update' and + ( + msg.last_status = 'pending' or + (msg.last_status = 'failed' and msg.next_retry_at notnull) + ) and + msg.contact_method_id != usr.alert_status_log_contact_method_id and + msg.user_id = usr.id and + usr.alert_status_log_contact_method_id notnull + `), + cleanupStatusUpdateOptOut: p.P(` + delete from outgoing_messages msg + using users usr + where + msg.message_type = 'alert_status_update' and + ( + msg.last_status = 'pending' or + (msg.last_status = 'failed' and msg.next_retry_at notnull) + ) and + usr.alert_status_log_contact_method_id isnull and + usr.id = msg.user_id + `), + setSending: p.P(` + update outgoing_messages + set + last_status = 'sending', + last_status_at = now(), + status_details = '', + sending_deadline = now() + '10 seconds'::interval, + fired_at = now(), + provider_seq = 0, + provider_msg_id = null, + next_retry_at = null + where id = $1 + `), + + sendDeadlineExpired: p.P(` + update outgoing_messages + set + last_status = 'failed', + last_status_at = now(), + status_details = 'send deadline expired', + cycle_id = null, + next_retry_at = null + where + last_status = 'sending' and + sending_deadline <= now() + `), + retryReset: p.P(` + update outgoing_messages + set + last_status = 'pending', + status_details = '', + next_retry_at = null, + retry_count = retry_count + 1, + fired_at = null, + sent_at = null, + provider_msg_id = null, + provider_seq = 0 + where + last_status = 'failed' and + now() > next_retry_at and + retry_count < 3 + `), + retryClear: p.P(` + update outgoing_messages + set + next_retry_at = null, + cycle_id = null + where + last_status = 'failed' and + retry_count >= 3 and + (cycle_id notnull or next_retry_at notnull) + `), + + lockStmt: p.P(`lock outgoing_messages in exclusive mode`), + currentTime: p.P(`select now()`), + + failDisabledCM: p.P(` + update outgoing_messages msg + set + last_status = 'failed', + last_status_at = now(), + status_details = 'contact method disabled', + cycle_id = null, + next_retry_at = null + from user_contact_methods cm + where + msg.last_status = 'pending' and + msg.message_type != 'verification_message' and + cm.id = msg.contact_method_id and + cm.disabled + `), + pending: p.P(fmt.Sprintf(` + select + msg.id, + msg.message_type, + msg.contact_method_id, + cm.type, + msg.alert_id, + msg.alert_log_id, + msg.user_verification_code_id, + msg.channel_id, + chan.type + from outgoing_messages msg + left join user_contact_methods cm on cm.id = msg.contact_method_id + left join notification_channels chan on chan.id = msg.channel_id + where last_status = 'pending' and (not cm isnull or not chan isnull) + order by + msg.message_type, + (select max(sent_at) from outgoing_messages om where om.escalation_policy_id = msg.escalation_policy_id) nulls first, + (select max(sent_at) from outgoing_messages om where om.service_id = msg.service_id) nulls first, + (select max(sent_at) from outgoing_messages om where om.alert_id = msg.alert_id) nulls first, + channel_id, + (select max(sent_at) from outgoing_messages om where om.user_id = msg.user_id) nulls first, + (select max(sent_at) from outgoing_messages om where om.contact_method_id = msg.contact_method_id) nulls first, + msg.created_at, + msg.alert_id, + msg.alert_log_id, + msg.contact_method_id + limit %d + `, c.MaxMessagesPerCycle)), + }, p.Err +} + +func (db *DB) getRows(ctx context.Context, tx *sql.Tx) ([]row, error) { + rows, err := tx.Stmt(db.pending).QueryContext(ctx) + if err != nil { + return nil, errors.Wrap(err, "fetch outgoing messages") + } + defer rows.Close() + + var result []row + var r row + for rows.Next() { + var cmID, cmType, chID, chType sql.NullString + err = rows.Scan( + &r.ID, + &r.Type, + &cmID, + &cmType, + &r.AlertID, + &r.AlertLogID, + &r.VerifyID, + &chID, + &chType, + ) + if err != nil { + return nil, errors.Wrap(err, "scan row") + } + switch { + case cmType.String == string(contactmethod.TypeSMS): + r.DestType = notification.DestTypeSMS + r.DestID = cmID.String + case cmType.String == string(contactmethod.TypeVoice): + r.DestType = notification.DestTypeVoice + r.DestID = cmID.String + case chType.String == string(notificationchannel.TypeSlack): + r.DestType = notification.DestTypeSlackChannel + r.DestID = chID.String + default: + log.Debugf(ctx, "unknown message type for message %s", r.ID) + continue + } + + result = append(result, r) + } + + return result, nil +} + +// UpdateMessageStatus will update the state of a message. +func (db *DB) UpdateMessageStatus(ctx context.Context, status *notification.MessageStatus) error { + return retry.DoTemporaryError(func(int) error { + return db._UpdateMessageStatus(ctx, status) + }, + retry.Log(ctx), + retry.Limit(10), + retry.FibBackoff(time.Millisecond*100), + ) +} +func (db *DB) _UpdateMessageStatus(ctx context.Context, status *notification.MessageStatus) error { + err := permission.LimitCheckAny(ctx, permission.System) + if err != nil { + return err + } + var cbID, pID sql.NullString + if status.ID != "" { + cbID.Valid = true + cbID.String = status.ID + } + if status.ProviderMessageID != "" { + pID.Valid = true + pID.String = status.ProviderMessageID + } + + if status.State == notification.MessageStateFailedTemp { + _, err = db.tempFail.ExecContext(ctx, cbID, pID, status.Details) + return err + } + if status.State == notification.MessageStateFailedPerm { + _, err = db.permFail.ExecContext(ctx, cbID, pID, status.Details) + return err + } + + var s Status + switch status.State { + case notification.MessageStateActive: + s = StatusQueuedRemotely + case notification.MessageStateSent: + s = StatusSent + case notification.MessageStateDelivered: + s = StatusDelivered + } + + _, err = db.updateStatus.ExecContext(ctx, cbID, pID, status.Sequence, s, status.Details) + return err +} + +// SendFunc defines a function that sends messages. +type SendFunc func(context.Context, *Message) (*notification.MessageStatus, error) + +// ErrAbort is returned when an early-abort is returned due to pause. +var ErrAbort = errors.New("aborted due to pause") + +// StatusFunc is used to fetch the latest status of a message. +type StatusFunc func(ctx context.Context, id, providerMsgID string) (*notification.MessageStatus, error) + +// SendMessages will send notifications using SendFunc. +func (db *DB) SendMessages(ctx context.Context, send SendFunc, status StatusFunc) error { + err := db._SendMessages(ctx, send, status) + if db.c.Pausable.IsPausing() { + return ErrAbort + } + return err +} + +func (db *DB) _SendMessages(ctx context.Context, send SendFunc, status StatusFunc) error { + err := permission.LimitCheckAny(ctx, permission.System) + if err != nil { + return err + } + log.Debugf(ctx, "Sending outgoing messages.") + + execCtx, execCancel := context.WithCancel(ctx) + execDone := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + case <-db.c.Pausable.PauseWait(): + case <-execDone: + } + execCancel() + }() + + res, err := db.advLockCleanup.ExecContext(execCtx, lock.GlobalMessageSending) + if err != nil { + return errors.Wrap(err, "terminate stale backend locks") + } + rows, _ := res.RowsAffected() + if rows > 0 { + log.Log(execCtx, errors.Errorf("terminated %d stale backend instance(s) holding message sending lock", rows)) + } + + cLock, err := db.lock.Conn(execCtx) + if err != nil { + return errors.Wrap(err, "get DB conn") + } + defer cLock.Close() + + _, err = cLock.Exec(execCtx, db.advLock, lock.GlobalMessageSending) + if err != nil { + return errors.Wrap(err, "acquire global sending advisory lock") + } + defer func() { + ctx := trace.NewContext(context.Background(), trace.FromContext(execCtx)) + cLock.ExecWithoutLock(ctx, `select pg_advisory_unlock_all()`) + }() + + tx, err := cLock.BeginTx(execCtx, nil) + if err != nil { + return errors.Wrap(err, "begin transaction") + } + defer tx.Rollback() + + _, err = tx.Stmt(db.lockStmt).ExecContext(execCtx) + if err != nil { + return errors.Wrap(err, "acquire exclusive locks") + } + + var t time.Time + err = tx.Stmt(db.currentTime).QueryRowContext(execCtx).Scan(&t) + if err != nil { + return errors.Wrap(err, "get current time") + } + + _, err = tx.Stmt(db.updateCMStatusUpdate).ExecContext(execCtx) + if err != nil { + return errors.Wrap(err, "update status update CM preferences") + } + + _, err = tx.Stmt(db.cleanupStatusUpdateOptOut).ExecContext(execCtx) + if err != nil { + return errors.Wrap(err, "clear disabled status updates") + } + + _, err = tx.Stmt(db.failDisabledCM).ExecContext(execCtx) + if err != nil { + return errors.Wrap(err, "check for disabled CMs") + } + + _, err = tx.Stmt(db.sendDeadlineExpired).ExecContext(ctx) + if err != nil { + return errors.Wrap(err, "fail expired messages") + } + + _, err = tx.Stmt(db.retryClear).ExecContext(ctx) + if err != nil { + return errors.Wrap(err, "clear max retries") + } + + _, err = tx.Stmt(db.retryReset).ExecContext(execCtx) + if err != nil { + return errors.Wrap(err, "reset retry messages") + } + + msgs, err := db.getRows(ctx, tx) + if err != nil { + return errors.Wrap(err, "get pending messages") + } + + counts := make(batchCounts, len(db.c.RateLimit)) + for cType, cfg := range db.c.RateLimit { + if cfg == nil || cfg.Batch <= 0 || cfg.PerSecond < 1 || !cType.IsUserCM() { + continue + } + var c int + err = tx.Stmt(db.sentByCMType).QueryRowContext(execCtx, t.Add(-cfg.Batch), contactmethod.TypeFromDestType(cType)).Scan(&c) + if err != nil { + return errors.Wrap(err, "get sent message count") + } + counts[cType] = c + } + + err = tx.Commit() + if err != nil { + return errors.Wrap(err, "commit message updates") + } + + if len(msgs) > 0 { + msgByType := make(map[notification.DestType][]row) + + for _, m := range msgs { + msgByType[m.DestType] = append(msgByType[m.DestType], m) + } + + // ensure we cancel sending other messages on err + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // ensure the buffer is large enough to hold all responses, even if we exit on err + // otherwise the goroutine will hang and be a memory leak + errCh := make(chan error, len(msgByType)) + + for typ, rows := range msgByType { + toSend := db.c.batchNum(typ) // max messages per cycle + if toSend == 0 { + // no limit + go db.sendAllMessages(ctx, cLock, send, rows, 0, errCh) + continue + } + + toSend -= counts[typ] + if toSend <= 0 { + // nothing to send + errCh <- nil + continue + } + + // only send remaining in queue + go db.sendAllMessages(ctx, cLock, send, rows, toSend, errCh) + } + + n := 0 + for err := range errCh { + n++ + if err != nil && errors.Cause(err) != processinglock.ErrNoLock { + log.Log(ctx, errors.Wrap(err, "send")) + } + // jump out once we've completed all types + if n == len(msgByType) { + break + } + } + } + + return db.updateStuckMessages(ctx, status) +} + +func (db *DB) refreshMessageState(ctx context.Context, statusFn StatusFunc, id, providerMsgID string, res chan *notification.MessageStatus) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + status, err := statusFn(ctx, id, providerMsgID) + if err != nil { + res <- ¬ification.MessageStatus{ + Ctx: ctx, + ID: id, + ProviderMessageID: providerMsgID, + State: notification.MessageStateActive, + Details: "failed to update status: " + err.Error(), + Sequence: -1, + } + return + } + stat := *status + if stat.State == notification.MessageStateFailedTemp { + stat.State = notification.MessageStateFailedPerm + } + stat.Sequence = -1 + stat.Ctx = ctx + res <- &stat +} +func (db *DB) updateStuckMessages(ctx context.Context, statusFn StatusFunc) error { + tx, err := db.lock.BeginTx(ctx, nil) + if err != nil { + return err + } + defer tx.Rollback() + + rows, err := tx.Stmt(db.stuckMessages).QueryContext(ctx) + if err != nil { + return err + } + defer rows.Close() + + type msg struct{ id, pID string } + var toCheck []msg + for rows.Next() { + var m msg + err = rows.Scan(&m.id, &m.pID) + if err != nil { + return err + } + toCheck = append(toCheck, m) + } + + err = tx.Commit() + if err != nil { + return err + } + + ch := make(chan *notification.MessageStatus, len(toCheck)) + for _, m := range toCheck { + go db.refreshMessageState(ctx, statusFn, m.id, m.pID, ch) + } + + for range toCheck { + err := db._UpdateMessageStatus(ctx, <-ch) + if err != nil { + log.Log(ctx, errors.Wrap(err, "update stale message status")) + } + } + + return nil +} + +func (db *DB) sendAllMessages(ctx context.Context, cLock *processinglock.Conn, send SendFunc, rows []row, count int, errCh chan error) { + type sendResult struct { + sent bool + err error + } + + ch := make(chan sendResult, len(rows)) // ensure we can store all responses if needed + doSend := func(r row) { + var res sendResult + res.sent, res.err = db.sendMessage(ctx, cLock, send, &r) + ch <- res + } + + var sent int + var pending int + for i, m := range rows { + if db.c.Pausable.IsPausing() { + // abort due to pause + break + } + go doSend(m) + pending++ + if i < 20 && (i < count || count == 0) { + continue + } + + res := <-ch + pending-- + if res.err != nil { + errCh <- res.err + return + } + if res.sent { + sent++ + } + if count > 0 && sent == count { + break + } + } + + for ; pending > 0; pending-- { + // check remaining responses for errors + res := <-ch + if res.err != nil { + errCh <- res.err + return + } + } + + errCh <- nil +} + +func (db *DB) sendMessage(ctx context.Context, cLock *processinglock.Conn, send SendFunc, m *row) (bool, error) { + ctx, sp := trace.StartSpan(ctx, "Engine.MessageManager.SendMessage") + defer sp.End() + ctx = log.WithFields(ctx, log.Fields{ + "DestTypeID": m.DestID, + "DestType": m.DestType.String(), + "CallbackID": m.ID, + }) + sp.AddAttributes( + trace.StringAttribute("message.dest.id", m.DestID), + trace.StringAttribute("message.dest.type", m.DestType.String()), + trace.StringAttribute("message.callback.id", m.ID), + ) + var alertID int + if m.AlertID.Valid { + alertID = int(m.AlertID.Int64) + ctx = log.WithField(ctx, "AlertID", alertID) + } + _, err := cLock.Exec(ctx, db.setSending, m.ID) + if err != nil { + return false, err + } + sCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + var status *notification.MessageStatus + err = retry.DoTemporaryError(func(int) error { + status, err = send(sCtx, &Message{ + ID: m.ID, + Type: m.Type, + DestType: m.DestType, + DestID: m.DestID, + AlertID: alertID, + AlertLogID: int(m.AlertLogID.Int64), + VerifyID: m.VerifyID.String, + }) + return err + }, + retry.Log(ctx), + retry.Limit(10), + retry.FibBackoff(65*time.Millisecond), + ) + cancel() + + var pID sql.NullString + if status != nil && status.ProviderMessageID != "" { + pID.Valid = true + pID.String = status.ProviderMessageID + } + + retryExec := func(s *sql.Stmt, args ...interface{}) error { + return retry.DoTemporaryError(func(int) error { + _, err := s.ExecContext(ctx, args...) + return err + }, + retry.Limit(15), + retry.FibBackoff(time.Millisecond*50), + ) + } + if err != nil { + log.Log(ctx, errors.Wrap(err, "send message")) + + err = retryExec(db.tempFail, m.ID, pID, err.Error()) + return false, errors.Wrap(err, "mark failed message") + } + + if status.State == notification.MessageStateFailedTemp { + err = retryExec(db.tempFail, m.ID, pID, status.Details) + return false, errors.Wrap(err, "mark failed message (temp)") + } + if status.State == notification.MessageStateFailedPerm { + err = retryExec(db.permFail, m.ID, pID, status.Details) + return false, errors.Wrap(err, "mark failed message (perm)") + } + + return true, errors.Wrap(db.UpdateMessageStatus(ctx, status), "update message status") +} diff --git a/engine/message/message.go b/engine/message/message.go new file mode 100644 index 0000000000..b60128afc2 --- /dev/null +++ b/engine/message/message.go @@ -0,0 +1,16 @@ +package message + +import ( + "github.com/target/goalert/notification" +) + +// Message represents the data for an outgoing message. +type Message struct { + ID string + Type Type + DestType notification.DestType + DestID string + AlertID int + AlertLogID int + VerifyID string +} diff --git a/engine/message/row.go b/engine/message/row.go new file mode 100644 index 0000000000..d57c4b3c20 --- /dev/null +++ b/engine/message/row.go @@ -0,0 +1,20 @@ +package message + +import ( + "database/sql" + "github.com/target/goalert/notification" + "time" +) + +type batchCounts map[notification.DestType]int + +type row struct { + ID string + CreatedAt time.Time + Type Type + DestType notification.DestType + DestID string + AlertID sql.NullInt64 + AlertLogID sql.NullInt64 + VerifyID sql.NullString +} diff --git a/engine/message/status.go b/engine/message/status.go new file mode 100644 index 0000000000..167c2bb4a0 --- /dev/null +++ b/engine/message/status.go @@ -0,0 +1,37 @@ +package message + +// Status represents the current state of an outgoing message. +type Status string + +// Defined status values +const ( + // StatusPending means the message is waiting to be sent. + StatusPending = Status("pending") + + // StatusSending means the message is in the process of being sent upstream + StatusSending = Status("sending") + + // StatusQueuedRemotely means the message has been sent upstream, but is in a remote queue. + StatusQueuedRemotely = Status("queued_remotely") + + // StatusSent means the message has been sent upstream, and has left the remote queue (if one exists). + StatusSent = Status("sent") + + // StatusDelivered will be set on delivery if the upstream supports delivery confirmation. + StatusDelivered = Status("delivered") + + // StatusFailed means the message failed to send. + StatusFailed = Status("failed") + + // StatusStale is used if the message expired before being sent. + StatusStale = Status("stale") +) + +// IsSent returns true if the message has been successfully sent to the downstream server. +func (s Status) IsSent() bool { + switch s { + case StatusQueuedRemotely, StatusDelivered, StatusSent: + return true + } + return false +} diff --git a/engine/message/type.go b/engine/message/type.go new file mode 100644 index 0000000000..8dd26a1cec --- /dev/null +++ b/engine/message/type.go @@ -0,0 +1,12 @@ +package message + +// Type represents the purpose of a message in the outgoing messages queue. +type Type string + +// defined message types +const ( + TypeAlertNotification Type = "alert_notification" + TypeTestNotification Type = "test_notification" + TypeVerificationMessage Type = "verification_message" + TypeAlertStatusUpdate Type = "alert_status_update" +) diff --git a/engine/npcyclemanager/db.go b/engine/npcyclemanager/db.go new file mode 100644 index 0000000000..32ab466276 --- /dev/null +++ b/engine/npcyclemanager/db.go @@ -0,0 +1,110 @@ +package npcyclemanager + +import ( + "context" + "database/sql" + "github.com/target/goalert/engine/processinglock" + "github.com/target/goalert/util" +) + +// DB manages user notification cycles in Postgres. +// +// It handles queueing of notifications. +type DB struct { + lock *processinglock.Lock + + queueMessages *sql.Stmt +} + +// Name returns the name of the module. +func (db *DB) Name() string { return "Engine.NotificationCycleManager" } + +// NewDB creates a new DB. +func NewDB(ctx context.Context, db *sql.DB) (*DB, error) { + lock, err := processinglock.NewLock(ctx, db, processinglock.Config{ + Type: processinglock.TypeNPCycle, + Version: 2, + }) + if err != nil { + return nil, err + } + p := &util.Prepare{DB: db, Ctx: ctx} + + return &DB{ + lock: lock, + + // add messages for notification rules who's delay is between the last tick and now. + // + // Example: + // - policy started at 1:00 + // - notifications were sent for 0-minute at 1:00:15 (last tick = 1:00:15) + // - at 1:01:15 only notification rules with delays between 15 and 75 seconds would be processed/sent + // Note: since delays are in minutes, the above example would just send the 1 minute rules (60 seconds) + queueMessages: p.P(` + with lock_cycles as ( + select + id, + alert_id, + user_id, + started_at, + last_tick + from notification_policy_cycles + where + last_tick isnull or + last_tick < now() - '1 minute'::interval + order by + last_tick nulls first, + started_at + for update skip locked + limit 1250 + ), deleted as ( + delete from notification_policy_cycles cycle + using alerts a, lock_cycles lock + where + a.status != 'triggered' and a.id = cycle.alert_id and + cycle.id = lock.id + returning cycle.id + ), process_cycles as ( + select * + from lock_cycles lock + where not exists ( + select null + from deleted del + where lock.id = del.id + ) + ), inserted as ( + insert into outgoing_messages ( + message_type, + contact_method_id, + alert_id, + cycle_id, + user_id, + service_id, + escalation_policy_id + ) + select distinct + cast('alert_notification' as enum_outgoing_messages_type), + rule.contact_method_id, + cycle.alert_id, + cycle.id, + rule.user_id, + a.service_id, + svc.escalation_policy_id + from process_cycles cycle + join alerts a on a.id = cycle.alert_id + join services svc on svc.id = a.service_id + join user_notification_rules rule on + rule.user_id = cycle.user_id and + ( + cycle.last_tick isnull or + concat(rule.delay_minutes,' minutes')::interval > (cycle.last_tick - cycle.started_at) + ) and + concat(rule.delay_minutes,' minutes')::interval <= (now() - cycle.started_at) + returning cycle_id + ) + update notification_policy_cycles + set last_tick = greatest(last_tick, now()) + where id in (select id from process_cycles) + `), + }, p.Err +} diff --git a/engine/npcyclemanager/update.go b/engine/npcyclemanager/update.go new file mode 100644 index 0000000000..b371b54535 --- /dev/null +++ b/engine/npcyclemanager/update.go @@ -0,0 +1,32 @@ +package npcyclemanager + +import ( + "context" + "github.com/target/goalert/permission" + "github.com/target/goalert/util/log" + + "github.com/pkg/errors" +) + +// UpdateAll will update and cleanup all notification cycles. +func (db *DB) UpdateAll(ctx context.Context) error { + err := db.update(ctx, true, nil) + return err +} + +// UpdateOneAlert will update and cleanup all notification cycles for the given alert. +func (db *DB) UpdateOneAlert(ctx context.Context, alertID int) error { + ctx = log.WithField(ctx, "AlertID", alertID) + return db.update(ctx, false, &alertID) +} + +func (db *DB) update(ctx context.Context, all bool, alertID *int) error { + err := permission.LimitCheckAny(ctx, permission.System) + if err != nil { + return err + } + log.Debugf(ctx, "Updating notification cycles.") + + _, err = db.lock.Exec(ctx, db.queueMessages) + return errors.Wrap(err, "queue outgoing messages") +} diff --git a/engine/processinglock/config.go b/engine/processinglock/config.go new file mode 100644 index 0000000000..6d73bb7491 --- /dev/null +++ b/engine/processinglock/config.go @@ -0,0 +1,32 @@ +package processinglock + +import ( + "fmt" + + "go.opencensus.io/trace" +) + +// Config defines the parameters of the lock. +type Config struct { + Type Type + Version int // Version must match the value in engine_processing_versions exactly or no lock will be obtained. +} + +// String returns the string representation of Config. +func (cfg Config) String() string { + return fmt.Sprintf("%s:v%d", cfg.Type, cfg.Version) +} + +func (cfg Config) spanAttrs(extra ...trace.Attribute) []trace.Attribute { + return append([]trace.Attribute{ + trace.StringAttribute("processingLock.type", string(cfg.Type)), + trace.Int64Attribute("processingLock.version", int64(cfg.Version)), + }, extra...) +} +func (cfg Config) decorateSpan(sp *trace.Span) { + if sp == nil { + return + } + + sp.AddAttributes(cfg.spanAttrs()...) +} diff --git a/engine/processinglock/conn.go b/engine/processinglock/conn.go new file mode 100644 index 0000000000..18650cc149 --- /dev/null +++ b/engine/processinglock/conn.go @@ -0,0 +1,59 @@ +package processinglock + +import ( + "context" + "database/sql" + "sync" +) + +// Conn allows using locked transactions over a single connection. +type Conn struct { + l *Lock + conn *sql.Conn + mx sync.Mutex +} + +// Conn returns a new connection from the DB pool. +// +// Note: No version checking/locking is done until a transaction is started. +func (l *Lock) Conn(ctx context.Context) (*Conn, error) { + c, err := l.db.Conn(ctx) + if err != nil { + return nil, err + } + _, err = c.ExecContext(ctx, `SET idle_in_transaction_session_timeout = 3000`) + if err != nil { + c.Close() + return nil, err + } + + _, err = c.ExecContext(ctx, `SET lock_timeout = 8000`) + if err != nil { + c.Close() + return nil, err + } + + return &Conn{l: l, conn: c}, nil +} + +// BeginTx will start a new transaction. +func (c *Conn) BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error) { + return c.l._BeginTx(ctx, c.conn, opts) +} + +// Exec will call ExecContext on the statement wrapped in a locked transaction. +func (c *Conn) Exec(ctx context.Context, stmt *sql.Stmt, args ...interface{}) (sql.Result, error) { + c.mx.Lock() + defer c.mx.Unlock() + return c.l._Exec(ctx, c.conn, stmt, args...) +} + +// ExecWithoutLock will run a query directly on the connection (no Tx or locking). +func (c *Conn) ExecWithoutLock(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + c.mx.Lock() + defer c.mx.Unlock() + return c.conn.ExecContext(ctx, query, args...) +} + +// Close returns the connection to the pool. +func (c *Conn) Close() error { return c.conn.Close() } diff --git a/engine/processinglock/error.go b/engine/processinglock/error.go new file mode 100644 index 0000000000..87ccbc7dcd --- /dev/null +++ b/engine/processinglock/error.go @@ -0,0 +1,11 @@ +package processinglock + +import ( + "github.com/pkg/errors" +) + +// Static errors +var ( + // ErrNoLock is returned when a lock can not be acquired due to normal causes. + ErrNoLock = errors.New("advisory lock already taken or incompatible version") +) diff --git a/engine/processinglock/lock.go b/engine/processinglock/lock.go new file mode 100644 index 0000000000..3a866fb861 --- /dev/null +++ b/engine/processinglock/lock.go @@ -0,0 +1,94 @@ +package processinglock + +import ( + "context" + "database/sql" + "github.com/target/goalert/util" + + "github.com/lib/pq" + "go.opencensus.io/trace" +) + +// A Lock is used to start "locked" transactions. +type Lock struct { + cfg Config + db *sql.DB + lockStmt *sql.Stmt +} +type txBeginner interface { + BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error) +} + +// NewLock will return a new Lock for the given Config. +func NewLock(ctx context.Context, db *sql.DB, cfg Config) (*Lock, error) { + p := &util.Prepare{Ctx: ctx, DB: db} + return &Lock{ + db: db, + cfg: cfg, + lockStmt: p.P(` + select version + from engine_processing_versions + where type_id = $1 + for update nowait + `), + }, p.Err +} + +func (l *Lock) _BeginTx(ctx context.Context, b txBeginner, opts *sql.TxOptions) (*sql.Tx, error) { + ctx, sp := trace.StartSpan(ctx, "ProcessingLock.BeginTx") + defer sp.End() + l.cfg.decorateSpan(sp) + + tx, err := b.BeginTx(ctx, opts) + if err != nil { + return nil, err + } + + var dbVersion int + err = tx.StmtContext(ctx, l.lockStmt).QueryRowContext(ctx, l.cfg.Type).Scan(&dbVersion) + if err != nil { + tx.Rollback() + // 55P03 is lock_not_available (due to the `nowait` in the query) + // + // https://www.postgresql.org/docs/9.4/static/errcodes-appendix.html + if pqErr, ok := err.(*pq.Error); ok && pqErr.Code == "55P03" { + return nil, ErrNoLock + } + return nil, err + } + if dbVersion != l.cfg.Version { + tx.Rollback() + return nil, ErrNoLock + } + + return tx, nil +} +func (l *Lock) _Exec(ctx context.Context, b txBeginner, stmt *sql.Stmt, args ...interface{}) (sql.Result, error) { + tx, err := l._BeginTx(ctx, b, nil) + if err != nil { + return nil, err + } + defer tx.Rollback() + + res, err := tx.StmtContext(ctx, stmt).ExecContext(ctx, args...) + if err != nil { + return nil, err + } + + err = tx.Commit() + if err != nil { + return nil, err + } + + return res, nil +} + +// BeginTx will start a transaction with the appropriate lock in place (based on Config). +func (l *Lock) BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error) { + return l._BeginTx(ctx, l.db, opts) +} + +// Exec will run ExecContext on the statement, wrapped in a locked transaction. +func (l *Lock) Exec(ctx context.Context, stmt *sql.Stmt, args ...interface{}) (sql.Result, error) { + return l._Exec(ctx, l.db, stmt, args...) +} diff --git a/engine/processinglock/type.go b/engine/processinglock/type.go new file mode 100644 index 0000000000..2a634d142e --- /dev/null +++ b/engine/processinglock/type.go @@ -0,0 +1,81 @@ +package processinglock + +import ( + "database/sql/driver" + "fmt" + "github.com/target/goalert/validation/validate" +) + +// Type indicates the lock type. For TypeMessage, the RegionID is used. +type Type string + +// Recognized types +const ( + TypeEscalation Type = "escalation" + TypeHeartbeat Type = "heartbeat" + TypeNPCycle Type = "np_cycle" + TypeRotation Type = "rotation" + TypeSchedule Type = "schedule" + TypeStatusUpdate Type = "status_update" + TypeVerify Type = "verify" + TypeMessage Type = "message" + TypeCleanup Type = "cleanup" +) + +func (t Type) validate() error { + return validate.OneOf("Type", t, + TypeEscalation, + TypeHeartbeat, + TypeNPCycle, + TypeRotation, + TypeSchedule, + TypeStatusUpdate, + TypeVerify, + TypeMessage, + TypeCleanup, + ) +} + +// Value will return the DB enum value of the Type. +func (t Type) Value() (driver.Value, error) { + return string(t), t.validate() +} + +// Scan will scan a DB enum value into Type. +func (t *Type) Scan(value interface{}) error { + switch _t := value.(type) { + case []byte: + *t = Type(_t) + case string: + *t = Type(_t) + default: + return fmt.Errorf("could not process unknown type for Type(%T)", t) + } + return t.validate() +} + +// LockID returns the int value used for the advisory lock for the Type. +func (t Type) LockID() int { + switch t { + case TypeEscalation: + return 0x1000 // 4096 + case TypeHeartbeat: + return 0x1010 // 4112 + case TypeNPCycle: + return 0x1020 // 4128 + case TypeRotation: + return 0x1030 // 4144 + case TypeSchedule: + return 0x1040 // 4160 + case TypeStatusUpdate: + return 0x1050 // 4176 + case TypeVerify: + return 0x1060 // 4192 + case TypeMessage: + return 0x1070 // 4208 + case TypeCleanup: + return 0x1080 // 4224 + } + + panic("invalid type") +} diff --git a/engine/resolver/resolver.go b/engine/resolver/resolver.go new file mode 100644 index 0000000000..b07d94e572 --- /dev/null +++ b/engine/resolver/resolver.go @@ -0,0 +1,402 @@ +package resolver + +import ( + "context" + "database/sql" + "github.com/target/goalert/permission" + "github.com/target/goalert/schedule" + "github.com/target/goalert/schedule/rule" + "github.com/target/goalert/util" + "github.com/target/goalert/validation/validate" + + "github.com/pkg/errors" +) + +type Resolver interface { + AlertEPID(context.Context, int) (string, error) + IsUserOnCall(ctx context.Context, userID string) (bool, error) + OnCallByUser(ctx context.Context, userID string) ([]OnCallAssignment, error) +} + +type OnCallAssignment struct { + ServiceID string `json:"service_id"` + ServiceName string `json:"service_name"` + EPID string `json:"escalation_policy_id"` + EPName string `json:"escalation_policy_name"` + Level int `json:"escalation_policy_step_number"` + RotationID string `json:"rotation_id"` + RotationName string `json:"rotation_name"` + ScheduleID string `json:"schedule_id"` + ScheduleName string `json:"schedule_name"` + UserID string `json:"user_id"` + IsActive bool `json:"is_active"` +} + +type DB struct { + db *sql.DB + + epID *sql.Stmt + + isOnCall *sql.Stmt + + onCallEPDirect *sql.Stmt + onCallEPRot *sql.Stmt + + onCallRemoveOverrides *sql.Stmt + onCallAddOverrideAssignments *sql.Stmt + onCallReplaceOverrideAssignments *sql.Stmt + onCallDirectAssignments *sql.Stmt + + rules rule.Store + sched schedule.Store +} + +func NewDB(ctx context.Context, db *sql.DB, rules rule.Store, sched schedule.Store) (*DB, error) { + p := &util.Prepare{DB: db, Ctx: ctx} + return &DB{ + db: db, + + rules: rules, + sched: sched, + + isOnCall: p.P(` + select 1 + from services svc + join escalation_policy_steps step on step.escalation_policy_id = svc.escalation_policy_id + join escalation_policy_actions act on act.escalation_policy_step_id = step.id + left join schedule_on_call_users sUser on + sUser.schedule_id = act.schedule_id and + sUser.user_id = $1 and + sUser.end_time isnull + left join rotation_state rState on rState.rotation_id = act.rotation_id + left join rotation_participants part on + part.id = rState.rotation_participant_id and + part.user_id = $1 + where coalesce(act.user_id, sUser.user_id, part.user_id) = $1 + limit 1 + `), + + onCallEPDirect: p.P(` + select distinct + svc.id, + svc.name, + ep.id, + ep.name, + step.step_number, + null, + null, + null, + null, + true + from services svc + join escalation_policies ep on ep.id = svc.escalation_policy_id + join escalation_policy_steps step on step.escalation_policy_id = svc.escalation_policy_id + join escalation_policy_actions act on + act.escalation_policy_step_id = step.id and + act.user_id = $1 + `), + + onCallEPRot: p.P(` + select distinct + svc.id, + svc.name, + ep.id, + ep.name, + step.step_number, + null, + null, + act.rotation_id, + rot.name, + part.id = state.rotation_participant_id + from services svc + join escalation_policies ep on ep.id = svc.escalation_policy_id + join escalation_policy_steps step on step.escalation_policy_id = svc.escalation_policy_id + join escalation_policy_actions act on + act.escalation_policy_step_id = step.id and + act.rotation_id notnull + join rotations rot on rot.id = act.rotation_id + join rotation_participants part on + part.user_id = $1 and + part.rotation_id = act.rotation_id + join rotation_state state on state.rotation_id = act.rotation_id + `), + + onCallRemoveOverrides: p.P(` + select + tgt_schedule_id + from user_overrides + where + now() between start_time and end_time and + remove_user_id = $1 + `), + + onCallAddOverrideAssignments: p.P(` + select + svc.id, + svc.name, + ep.id, + ep.name, + step.step_number, + sched.id, + sched.name, + null, + null, + o.start_time <= now() + from user_overrides o + join schedules sched on sched.id = o.tgt_schedule_id + join escalation_policy_actions act on act.schedule_id = o.tgt_schedule_id + join escalation_policy_steps step on step.id = act.escalation_policy_step_id + join escalation_policies ep on ep.id = step.escalation_policy_id + join services svc on svc.escalation_policy_id = step.escalation_policy_id + where + o.end_time > now() and + o.add_user_id = $1 and + o.remove_user_id isnull + `), + onCallReplaceOverrideAssignments: p.P(` + select distinct + svc.id, + svc.name, + ep.id, + ep.name, + step.step_number, + sched.id, + sched.name, + null, + null, + onCall.user_id notnull + from user_overrides o + join escalation_policy_actions act on act.schedule_id = o.tgt_schedule_id + join escalation_policy_steps step on step.id = act.escalation_policy_step_id + join escalation_policies ep on ep.id = step.escalation_policy_id + join services svc on svc.escalation_policy_id = step.escalation_policy_id + join schedules sched on sched.id = o.tgt_schedule_id + join schedule_rules rule on + rule.schedule_id = o.tgt_schedule_id and + (rule.tgt_user_id isnull or rule.tgt_user_id = o.remove_user_id) + left join schedule_on_call_users onCall on + onCall.user_id = $1 and + onCall.schedule_id = act.schedule_id and + onCall.end_time isnull + left join rotation_participants part on + part.rotation_id = rule.tgt_rotation_id and + part.user_id = o.remove_user_id + where + o.end_time > now() and + o.add_user_id = $1 and + o.remove_user_id notnull and + (rule.tgt_user_id notnull or part notnull) + `), + + onCallDirectAssignments: p.P(` + select distinct + svc.id, + svc.name, + ep.id, + ep.name, + step.step_number, + sched.id, + sched.name, + null, + null, + onCall.user_id notnull + from services svc + join escalation_policies ep on ep.id = svc.escalation_policy_id + join escalation_policy_steps step on step.escalation_policy_id = svc.escalation_policy_id + join escalation_policy_actions act on + act.escalation_policy_step_id = step.id and + act.schedule_id notnull + join schedule_rules rule on + rule.schedule_id = act.schedule_id and + (rule.tgt_user_id isnull or rule.tgt_user_id = $1) + join schedules sched on sched.id = act.schedule_id + left join schedule_on_call_users onCall on + onCall.user_id = $1 and + onCall.schedule_id = act.schedule_id and + onCall.end_time isnull + left join rotation_participants part on + part.user_id = $1 and + part.rotation_id = rule.tgt_rotation_id + where + rule.tgt_user_id notnull or + part notnull + `), + + epID: p.P(` + select escalation_policy_id + from alerts a + join services s + on s.id = a.service_id + where a.id = $1 + `), + }, p.Err +} + +func (db *DB) AlertEPID(ctx context.Context, alertID int) (string, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return "", err + } + + row := db.epID.QueryRowContext(ctx, alertID) + var epID string + return epID, errors.Wrap(row.Scan(&epID), "query alert EP ID") +} + +func (db *DB) IsUserOnCall(ctx context.Context, userID string) (bool, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return false, err + } + err = validate.UUID("UserID", userID) + if err != nil { + return false, err + } + var result int + err = db.isOnCall.QueryRowContext(ctx, userID).Scan(&result) + if err == sql.ErrNoRows { + return false, nil + } + if err != nil { + return false, err + } + return result == 1, nil +} + +type assignment struct { + ServiceID string + ServiceName string + EPID string + EPName string + StepNumber int + SchedID sql.NullString + SchedName sql.NullString + RotID sql.NullString + RotName sql.NullString + Active bool +} + +func (a assignment) ID() assignmentID { + return assignmentID{ + ServiceID: a.ServiceID, + EPID: a.EPID, + StepNumber: a.StepNumber, + SchedID: a.SchedID.String, + RotID: a.RotID.String, + } +} + +type assignmentID struct { + ServiceID string + EPID string + StepNumber int + SchedID string + RotID string +} + +func (db *DB) OnCallByUser(ctx context.Context, userID string) ([]OnCallAssignment, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + err = validate.UUID("UserID", userID) + if err != nil { + return nil, err + } + + tx, err := db.db.BeginTx(ctx, nil) + if err != nil { + return nil, err + } + defer tx.Rollback() + + fetchAll := func(s *sql.Stmt) ([]assignment, error) { + var result []assignment + rows, err := tx.Stmt(s).QueryContext(ctx, userID) + if err != nil { + return nil, err + } + defer rows.Close() + var f assignment + for rows.Next() { + err = rows.Scan(&f.ServiceID, &f.ServiceName, &f.EPID, &f.EPName, &f.StepNumber, &f.SchedID, &f.SchedName, &f.RotID, &f.RotName, &f.Active) + if err != nil { + return nil, err + } + result = append(result, f) + } + return result, nil + } + + direct, err := fetchAll(db.onCallDirectAssignments) + if err != nil { + return nil, errors.Wrap(err, "fetch direct") + } + replace, err := fetchAll(db.onCallReplaceOverrideAssignments) + if err != nil { + return nil, errors.Wrap(err, "fetch replace") + } + add, err := fetchAll(db.onCallAddOverrideAssignments) + if err != nil { + return nil, errors.Wrap(err, "fetch add") + } + userTgt, err := fetchAll(db.onCallEPDirect) + if err != nil { + return nil, errors.Wrap(err, "fetch ep direct") + } + rotTgt, err := fetchAll(db.onCallEPRot) + if err != nil { + return nil, errors.Wrap(err, "fetch ep rotations") + } + remove := make(map[string]bool) + rows, err := tx.Stmt(db.onCallRemoveOverrides).QueryContext(ctx, userID) + if err != nil { + return nil, err + } + defer rows.Close() + var id string + for rows.Next() { + err = rows.Scan(&id) + if err != nil { + return nil, err + } + remove[id] = true + } + // done with DB stuff + rows.Close() + tx.Rollback() + + m := make(map[assignmentID]assignment) + + all := append(direct, replace...) + all = append(all, add...) + all = append(all, userTgt...) + all = append(all, rotTgt...) + + for _, a := range all { + id := a.ID() + e := m[id] + a.Active = (e.Active || a.Active) && !remove[a.SchedID.String] + m[id] = a + } + + result := make([]OnCallAssignment, 0, len(m)) + + for _, a := range m { + result = append(result, OnCallAssignment{ + ServiceID: a.ServiceID, + ServiceName: a.ServiceName, + EPID: a.EPID, + EPName: a.EPName, + Level: a.StepNumber, + RotationID: a.RotID.String, + RotationName: a.RotName.String, + ScheduleID: a.SchedID.String, + ScheduleName: a.SchedName.String, + UserID: userID, + IsActive: a.Active, + }) + } + + return result, nil +} diff --git a/engine/resolver/state.go b/engine/resolver/state.go new file mode 100644 index 0000000000..e9301874f9 --- /dev/null +++ b/engine/resolver/state.go @@ -0,0 +1,8 @@ +package resolver + +import "time" + +type State struct { + Time time.Time + AlertID int +} diff --git a/engine/rotationmanager/advance.go b/engine/rotationmanager/advance.go new file mode 100644 index 0000000000..645ffb8ddb --- /dev/null +++ b/engine/rotationmanager/advance.go @@ -0,0 +1,60 @@ +package rotationmanager + +import ( + "github.com/target/goalert/schedule/rotation" + "time" +) + +type advance struct { + id string + t time.Time + p int +} + +// calcAdvance will calculate rotation advancement if it is required. If not, nil is returned +func calcAdvance(t time.Time, rot *rotation.Rotation, state rotation.State, partCount int) *advance { + + // get next shift start time + newStart := rot.EndTime(state.ShiftStart) + var mustUpdate bool + if state.Position >= partCount { + // deleted last participant + state.Position = 0 + mustUpdate = true + } + + if newStart.After(t) { + if mustUpdate { + return &advance{ + id: rot.ID, + t: state.ShiftStart, + p: state.Position, + } + } + // in the future, so nothing to do yet + return nil + } + + state.ShiftStart = newStart + + c := 0 + for { + c++ + if c > 10000 { + panic("too many rotation advances") + } + + state.Position = (state.Position + 1) % partCount + end := rot.EndTime(state.ShiftStart) + if end.After(t) { + break + } + state.ShiftStart = end + } + + return &advance{ + id: rot.ID, + t: state.ShiftStart, + p: state.Position, + } +} diff --git a/engine/rotationmanager/db.go b/engine/rotationmanager/db.go new file mode 100644 index 0000000000..0a9ae013c1 --- /dev/null +++ b/engine/rotationmanager/db.go @@ -0,0 +1,61 @@ +package rotationmanager + +import ( + "context" + "database/sql" + "github.com/target/goalert/engine/processinglock" + "github.com/target/goalert/util" +) + +// DB manages rotations in Postgres. +type DB struct { + lock *processinglock.Lock + + currentTime *sql.Stmt + + rotate *sql.Stmt + rotateData *sql.Stmt +} + +// Name returns the name of the module. +func (db *DB) Name() string { return "Engine.RotationManager" } + +// NewDB will create a new DB, preparing all statements necessary. +func NewDB(ctx context.Context, db *sql.DB) (*DB, error) { + lock, err := processinglock.NewLock(ctx, db, processinglock.Config{ + Type: processinglock.TypeRotation, + Version: 1, + }) + if err != nil { + return nil, err + } + p := &util.Prepare{Ctx: ctx, DB: db} + + return &DB{ + lock: lock, + + currentTime: p.P(`select now()`), + rotate: p.P(` + update rotation_state + set + shift_start = now(), + rotation_participant_id = (select id from rotation_participants where rotation_id = $1 and position = $2) + where rotation_id = $1 + `), + rotateData: p.P(` + select + rot.id, + rot."type", + rot.start_time, + rot.shift_length, + rot.time_zone, + state.shift_start, + state."position", + rot.participant_count + from rotations rot + join rotation_state state on state.rotation_id = rot.id + where $1 or state.rotation_id = $2 + for update skip locked + `), + }, p.Err +} diff --git a/engine/rotationmanager/update.go b/engine/rotationmanager/update.go new file mode 100644 index 0000000000..40c84f32fb --- /dev/null +++ b/engine/rotationmanager/update.go @@ -0,0 +1,127 @@ +package rotationmanager + +import ( + "context" + "database/sql" + "github.com/target/goalert/permission" + "github.com/target/goalert/schedule/rotation" + "github.com/target/goalert/util" + "github.com/target/goalert/util/log" + "github.com/target/goalert/validation/validate" + "time" + + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// UpdateAll will update and cleanup the rotation state for all rotations. +func (db *DB) UpdateAll(ctx context.Context) error { + err := permission.LimitCheckAny(ctx, permission.System) + if err != nil { + return err + } + err = db.update(ctx, true, nil) + return err +} + +// UpdateOneRotation will update and cleanup the rotation state for the given rotation. +func (db *DB) UpdateOneRotation(ctx context.Context, rotID string) error { + err := permission.LimitCheckAny(ctx, permission.System) + if err != nil { + return err + } + err = validate.UUID("Rotation", rotID) + if err != nil { + return err + } + ctx = log.WithField(ctx, "RotationID", rotID) + return db.update(ctx, false, &rotID) +} +func (db *DB) update(ctx context.Context, all bool, rotID *string) error { + err := permission.LimitCheckAny(ctx, permission.System) + if err != nil { + return err + } + log.Debugf(ctx, "Updating rotations.") + + // process rotation advancement + tx, err := db.lock.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelRepeatableRead}) + if err != nil { + return errors.Wrap(err, "start advancement transaction") + } + defer tx.Rollback() + + needsAdvance, err := db.calcAdvances(ctx, tx, all, rotID) + if err != nil { + return errors.Wrap(err, "calc stale rotations") + } + + updateStmt := tx.Stmt(db.rotate) + for _, adv := range needsAdvance { + fctx := log.WithFields(ctx, log.Fields{ + "RotationID": adv.id, + "Position": adv.p, + }) + log.Debugf(fctx, "Advancing rotation.") + _, err = updateStmt.ExecContext(fctx, adv.id, adv.p) + if err != nil { + return errors.Wrap(err, "advance rotation") + } + } + + return errors.Wrap(tx.Commit(), "commit transaction") +} + +func (db *DB) calcAdvances(ctx context.Context, tx *sql.Tx, all bool, rotID *string) ([]advance, error) { + var t time.Time + err := tx.Stmt(db.currentTime).QueryRowContext(ctx).Scan(&t) + if err != nil { + return nil, errors.Wrap(err, "fetch current timestamp") + } + + rows, err := tx.Stmt(db.rotateData).QueryContext(ctx, all, rotID) + if err != nil { + return nil, errors.Wrap(err, "fetch current rotation state") + } + defer rows.Close() + + var rot rotation.Rotation + var state rotation.State + var partCount int + var tzName string + var adv *advance + var loc *time.Location + var needsAdvance []advance + + _, sp := trace.StartSpan(ctx, "Engine.RotationManager.ScanRows") + defer sp.End() + for rows.Next() { + err = rows.Scan( + &rot.ID, + &rot.Type, + &rot.Start, + &rot.ShiftLength, + &tzName, + &state.ShiftStart, + &state.Position, + &partCount, + ) + if err != nil { + return nil, errors.Wrap(err, "scan rotation data") + } + loc, err = util.LoadLocation(tzName) + if err != nil { + return nil, errors.Wrap(err, "load timezone") + } + rot.Start = rot.Start.In(loc) + adv = calcAdvance(t, &rot, state, partCount) + if adv != nil { + needsAdvance = append(needsAdvance, *adv) + if len(needsAdvance) == 150 { + // only process up to 150 at a time (of those that need updates) + break + } + } + } + return needsAdvance, nil +} diff --git a/engine/schedulemanager/db.go b/engine/schedulemanager/db.go new file mode 100644 index 0000000000..b07e187d38 --- /dev/null +++ b/engine/schedulemanager/db.go @@ -0,0 +1,91 @@ +package schedulemanager + +import ( + "context" + "database/sql" + "github.com/target/goalert/engine/processinglock" + "github.com/target/goalert/util" +) + +// DB will manage schedules and schedule rules in Postgres. +type DB struct { + lock *processinglock.Lock + + overrides *sql.Stmt + rules *sql.Stmt + currentTime *sql.Stmt + getOnCall *sql.Stmt + endOnCall *sql.Stmt + startOnCall *sql.Stmt +} + +// Name returns the name of the module. +func (db *DB) Name() string { return "Engine.ScheduleManager" } + +// NewDB will create a new DB instance, preparing all statements. +func NewDB(ctx context.Context, db *sql.DB) (*DB, error) { + lock, err := processinglock.NewLock(ctx, db, processinglock.Config{ + Type: processinglock.TypeSchedule, + Version: 2, + }) + if err != nil { + return nil, err + } + p := &util.Prepare{DB: db, Ctx: ctx} + + return &DB{ + lock: lock, + + overrides: p.P(` + select + add_user_id, + remove_user_id, + tgt_schedule_id + from user_overrides + where now() between start_time and end_time + `), + rules: p.P(` + select + rule.schedule_id, + ARRAY[ + sunday, + monday, + tuesday, + wednesday, + thursday, + friday, + saturday + ], + start_time, + end_time, + sched.time_zone, + coalesce(rule.tgt_user_id, part.user_id) + from schedule_rules rule + join schedules sched on sched.id = rule.schedule_id + left join rotation_state rState on rState.rotation_id = rule.tgt_rotation_id + left join rotation_participants part on part.id = rState.rotation_participant_id + where + coalesce(rule.tgt_user_id, part.user_id) notnull + `), + getOnCall: p.P(` + select schedule_id, user_id + from schedule_on_call_users + where + end_time isnull + `), + startOnCall: p.P(` + insert into schedule_on_call_users (schedule_id, start_time, user_id) + values ($1, now(), $2) + `), + endOnCall: p.P(` + update schedule_on_call_users + set end_time = now() + where + schedule_id = $1 and + user_id = $2 and + end_time isnull + `), + + currentTime: p.P(`select now()`), + }, p.Err +} diff --git a/engine/schedulemanager/update.go b/engine/schedulemanager/update.go new file mode 100644 index 0000000000..477a5890b0 --- /dev/null +++ b/engine/schedulemanager/update.go @@ -0,0 +1,183 @@ +package schedulemanager + +import ( + "context" + "database/sql" + "github.com/target/goalert/assignment" + "github.com/target/goalert/override" + "github.com/target/goalert/permission" + "github.com/target/goalert/schedule/rule" + "github.com/target/goalert/util" + "github.com/target/goalert/util/log" + "time" + + "github.com/lib/pq" + "github.com/pkg/errors" +) + +// UpdateAll will update all schedule rules. +func (db *DB) UpdateAll(ctx context.Context) error { + err := permission.LimitCheckAny(ctx, permission.System) + if err != nil { + return err + } + err = db.update(ctx) + return err +} + +func (db *DB) update(ctx context.Context) error { + tx, err := db.lock.BeginTx(ctx, nil) + if err != nil { + return errors.Wrap(err, "start transaction") + } + defer tx.Rollback() + log.Debugf(ctx, "Updating schedule rules.") + + var now time.Time + err = tx.Stmt(db.currentTime).QueryRowContext(ctx).Scan(&now) + if err != nil { + return errors.Wrap(err, "get DB time") + } + + rows, err := tx.Stmt(db.overrides).QueryContext(ctx) + if err != nil { + return errors.Wrap(err, "get active overrides") + } + defer rows.Close() + + var overrides []override.UserOverride + for rows.Next() { + var o override.UserOverride + var schedTgt sql.NullString + var add, rem sql.NullString + err = rows.Scan(&add, &rem, &schedTgt) + if err != nil { + return errors.Wrap(err, "scan override") + } + o.AddUserID = add.String + o.RemoveUserID = rem.String + if !schedTgt.Valid { + continue + } + o.Target = assignment.ScheduleTarget(schedTgt.String) + overrides = append(overrides, o) + } + + rows, err = tx.Stmt(db.rules).QueryContext(ctx) + if err != nil { + return errors.Wrap(err, "get rules") + } + defer rows.Close() + + type userRule struct { + rule.Rule + UserID string + } + + var rules []userRule + var tzName string + tz := make(map[string]*time.Location) + for rows.Next() { + var r userRule + filter := make(pq.BoolArray, 7) + err = rows.Scan( + &r.ScheduleID, + &filter, + &r.Start, + &r.End, + &tzName, + &r.UserID, + ) + if err != nil { + return errors.Wrap(err, "scan rule") + } + for i, v := range filter { + r.SetDay(time.Weekday(i), v) + } + if tz[r.ScheduleID] == nil { + tz[r.ScheduleID], err = util.LoadLocation(tzName) + if err != nil { + return errors.Wrap(err, "load timezone") + } + } + + rules = append(rules, r) + } + rows, err = tx.Stmt(db.getOnCall).QueryContext(ctx) + if err != nil { + return errors.Wrap(err, "get on call") + } + defer rows.Close() + + type onCall struct { + UserID string + ScheduleID string + } + oldOnCall := make(map[onCall]bool) + var oc onCall + for rows.Next() { + err = rows.Scan(&oc.ScheduleID, &oc.UserID) + if err != nil { + return errors.Wrap(err, "scan on call user") + } + oldOnCall[oc] = true + } + + newOnCall := make(map[onCall]bool, len(rules)) + for _, r := range rules { + if r.IsActive(now.In(tz[r.ScheduleID])) { + newOnCall[onCall{ScheduleID: r.ScheduleID, UserID: r.UserID}] = true + } + } + + for _, o := range overrides { + if o.AddUserID != "" && o.RemoveUserID == "" { + // ADD override + newOnCall[onCall{ScheduleID: o.Target.TargetID(), UserID: o.AddUserID}] = true + continue + } + if o.AddUserID == "" && o.RemoveUserID != "" { + // REMOVE override + delete(newOnCall, onCall{ScheduleID: o.Target.TargetID(), UserID: o.RemoveUserID}) + continue + } + + if newOnCall[onCall{ScheduleID: o.Target.TargetID(), UserID: o.RemoveUserID}] { + // REPLACE override + delete(newOnCall, onCall{ScheduleID: o.Target.TargetID(), UserID: o.RemoveUserID}) + newOnCall[onCall{ScheduleID: o.Target.TargetID(), UserID: o.AddUserID}] = true + } + } + + start := tx.Stmt(db.startOnCall) + + for oc := range newOnCall { + // not on call in DB, but are now + if !oldOnCall[oc] { + _, err = start.ExecContext(ctx, oc.ScheduleID, oc.UserID) + if err != nil && !isScheduleDeleted(err) { + return errors.Wrap(err, "record shift start") + } + } + } + end := tx.Stmt(db.endOnCall) + for oc := range oldOnCall { + // on call in DB, but no longer + if !newOnCall[oc] { + _, err = end.ExecContext(ctx, oc.ScheduleID, oc.UserID) + if err != nil { + return errors.Wrap(err, "record shift end") + } + } + } + + return tx.Commit() +} + +func isScheduleDeleted(err error) bool { + dbErr, ok := err.(*pq.Error) + if !ok { + return false + } + return dbErr.Constraint == "schedule_on_call_users_schedule_id_fkey" +} diff --git a/engine/sendmessage.go b/engine/sendmessage.go new file mode 100644 index 0000000000..ccf461bada --- /dev/null +++ b/engine/sendmessage.go @@ -0,0 +1,119 @@ +package engine + +import ( + "context" + alertlog "github.com/target/goalert/alert/log" + "github.com/target/goalert/notification" + "github.com/target/goalert/permission" + "github.com/target/goalert/util/log" + + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +func (p *Engine) sendMessage(ctx context.Context, msgID string, destType notification.DestType, destID string, disabledOK bool, fn func(notification.Dest) notification.Message, afterFn func(context.Context)) (*notification.MessageStatus, error) { + ctx, sp := trace.StartSpan(ctx, "Engine.SendMessage") + defer sp.End() + sp.AddAttributes( + trace.StringAttribute("message.id", msgID), + trace.StringAttribute("dest.type", destType.String()), + trace.StringAttribute("dest.id", destID), + ) + + var dest notification.Dest + if destType.IsUserCM() { + cm, err := p.cfg.ContactMethodStore.FindOne(ctx, destID) + if err != nil { + return nil, errors.Wrap(err, "lookup contact method") + } + trace.FromContext(ctx).AddAttributes(trace.StringAttribute("message.contactMethod.value", cm.Value)) + + if !disabledOK && cm.Disabled { + return nil, errDisabledCM + } + dest.Type = cm.Type.DestType() + dest.Value = cm.Value + ctx = permission.UserSourceContext(ctx, cm.UserID, permission.RoleUser, &permission.SourceInfo{ + Type: permission.SourceTypeContactMethod, + ID: cm.ID, + }) + } else { + ch, err := p.cfg.NCStore.FindOne(ctx, destID) + if err != nil { + return nil, errors.Wrap(err, "lookup notification channel") + } + dest.Type = ch.Type.DestType() + dest.Value = ch.Value + ctx = permission.SourceContext(ctx, &permission.SourceInfo{ + Type: permission.SourceTypeNotificationChannel, + ID: ch.ID, + }) + } + + msg := fn(dest) + + ctx = log.WithField(ctx, "CallbackID", msgID) + + status, err := p.cfg.NotificationSender.Send(ctx, msg) + if err != nil { + return nil, err + } + if afterFn != nil { + afterFn(ctx) + } + return status, nil +} + +func (p *Engine) sendStatusUpdate(ctx context.Context, msgID string, alertLogID int, destType notification.DestType, destID string) (*notification.MessageStatus, error) { + e, err := p.cfg.AlertlogStore.FindOne(ctx, alertLogID) + if err != nil { + return nil, errors.Wrap(err, "lookup alert log entry") + } + + return p.sendMessage(ctx, msgID, destType, destID, false, func(dest notification.Dest) notification.Message { + return notification.AlertStatus{ + Dest: dest, + AlertID: e.AlertID(), + MessageID: msgID, + Log: e.String(), + } + }, nil) +} +func (p *Engine) sendNotification(ctx context.Context, msgID string, alertID int, destType notification.DestType, destID string) (*notification.MessageStatus, error) { + a, err := p.am.FindOne(ctx, alertID) + if err != nil { + return nil, errors.Wrap(err, "lookup alert") + } + return p.sendMessage(ctx, msgID, destType, destID, false, func(dest notification.Dest) notification.Message { + return notification.Alert{ + Dest: dest, + AlertID: a.ID, + Summary: a.Summary, + Details: a.Details, + CallbackID: msgID, + } + }, func(ctx context.Context) { + p.cfg.AlertlogStore.MustLog(ctx, alertID, alertlog.TypeNotificationSent, nil) + }) +} +func (p *Engine) sendTestNotification(ctx context.Context, msgID string, destType notification.DestType, destID string) (*notification.MessageStatus, error) { + return p.sendMessage(ctx, msgID, destType, destID, false, func(dest notification.Dest) notification.Message { + return notification.Test{ + Dest: dest, + CallbackID: msgID, + } + }, nil) +} +func (p *Engine) sendVerificationMessage(ctx context.Context, msgID string, destType notification.DestType, destID, verifyID string) (*notification.MessageStatus, error) { + code, err := p.cfg.NotificationStore.Code(ctx, verifyID) + if err != nil { + return nil, errors.Wrap(err, "lookup verification code") + } + return p.sendMessage(ctx, msgID, destType, destID, true, func(dest notification.Dest) notification.Message { + return notification.Verification{ + Dest: dest, + CallbackID: msgID, + Code: code, + } + }, nil) +} diff --git a/engine/statusupdatemanager/db.go b/engine/statusupdatemanager/db.go new file mode 100644 index 0000000000..8d3d71f94f --- /dev/null +++ b/engine/statusupdatemanager/db.go @@ -0,0 +1,93 @@ +package statusupdatemanager + +import ( + "context" + "database/sql" + "github.com/target/goalert/engine/processinglock" + "github.com/target/goalert/util" +) + +// DB manages outgoing status updates. +type DB struct { + lock *processinglock.Lock + + insertMessages *sql.Stmt +} + +// Name returns the name of the module. +func (db *DB) Name() string { return "Engine.StatusUpdateManager" } + +// NewDB creates a new DB. +func NewDB(ctx context.Context, db *sql.DB) (*DB, error) { + lock, err := processinglock.NewLock(ctx, db, processinglock.Config{ + Type: processinglock.TypeStatusUpdate, + Version: 1, + }) + if err != nil { + return nil, err + } + p := &util.Prepare{DB: db, Ctx: ctx} + + return &DB{ + lock: lock, + + insertMessages: p.P(` + with rows as ( + select + log.id, + log.alert_id, + usr.alert_status_log_contact_method_id, + last.user_id, + log.event = 'closed' is_closed, + coalesce(last.user_id = log.sub_user_id, false) is_same_user + from user_last_alert_log last + join users usr on + usr.id = last.user_id + join alert_logs log ON + last.alert_id = log.alert_id AND + log.id BETWEEN last.log_id+1 AND last.next_log_id AND + log.event IN ('acknowledged', 'closed') + where last.log_id != last.next_log_id + limit 100 + for update skip locked + ), inserted as ( + insert into outgoing_messages ( + message_type, + alert_log_id, + alert_id, + contact_method_id, + user_id + ) + select + 'alert_status_update', + id, + alert_id, + alert_status_log_contact_method_id, + user_id + from rows + where + alert_status_log_contact_method_id notnull and + not is_same_user + ), any_closed as ( + select + bool_or(is_closed) is_closed, user_id, alert_id + from rows + group by user_id, alert_id + ), updated as ( + update user_last_alert_log log + set log_id = next_log_id + from any_closed c + where + not c.is_closed and + log.user_id = c.user_id and + log.alert_id = c.alert_id + ) + delete from user_last_alert_log log + using any_closed c + where + c.is_closed and + log.user_id = c.user_id and + log.alert_id = c.alert_id + `), + }, p.Err +} diff --git a/engine/statusupdatemanager/update.go b/engine/statusupdatemanager/update.go new file mode 100644 index 0000000000..6ddea706ac --- /dev/null +++ b/engine/statusupdatemanager/update.go @@ -0,0 +1,30 @@ +package statusupdatemanager + +import ( + "context" + "github.com/target/goalert/permission" + "github.com/target/goalert/util/log" + + "github.com/pkg/errors" +) + +// UpdateAll will update all schedule rules. +func (db *DB) UpdateAll(ctx context.Context) error { + err := permission.LimitCheckAny(ctx, permission.System) + if err != nil { + return err + } + err = db.update(ctx, true, nil) + return err +} + +func (db *DB) update(ctx context.Context, all bool, alertID *int) error { + log.Debugf(ctx, "Processing status updates.") + + _, err := db.lock.Exec(ctx, db.insertMessages) + if err != nil { + return errors.Wrap(err, "insert status update messages") + } + + return nil +} diff --git a/engine/verifymanager/db.go b/engine/verifymanager/db.go new file mode 100644 index 0000000000..b71ef50558 --- /dev/null +++ b/engine/verifymanager/db.go @@ -0,0 +1,63 @@ +package verifymanager + +import ( + "context" + "database/sql" + "github.com/target/goalert/engine/processinglock" + "github.com/target/goalert/util" +) + +// DB will manage verification codes. +type DB struct { + lock *processinglock.Lock + + insertMessages *sql.Stmt + cleanupExpired *sql.Stmt +} + +// Name returns the name of the module. +func (db *DB) Name() string { return "Engine.VerificationManager" } + +// NewDB will create a new DB instance, preparing all statements. +func NewDB(ctx context.Context, db *sql.DB) (*DB, error) { + lock, err := processinglock.NewLock(ctx, db, processinglock.Config{ + Type: processinglock.TypeVerify, + Version: 1, + }) + if err != nil { + return nil, err + } + p := &util.Prepare{DB: db, Ctx: ctx} + + return &DB{ + lock: lock, + insertMessages: p.P(` + with rows as ( + insert into outgoing_messages (message_type, contact_method_id, user_id, user_verification_code_id) + select 'verification_message', send_to, user_id, code.id + from user_verification_codes code + where send_to notnull and now() < expires_at + limit 100 + for update skip locked + returning user_verification_code_id id + ) + update user_verification_codes code + set send_to = null + from rows + where code.id = rows.id + `), + + cleanupExpired: p.P(` + with rows as ( + select id + from user_verification_codes + where now() >= expires_at + limit 100 + for update skip locked + ) + delete from user_verification_codes code + using rows + where code.id = rows.id + `), + }, p.Err +} diff --git a/engine/verifymanager/update.go b/engine/verifymanager/update.go new file mode 100644 index 0000000000..874fda6eef --- /dev/null +++ b/engine/verifymanager/update.go @@ -0,0 +1,42 @@ +package verifymanager + +import ( + "context" + "database/sql" + "github.com/target/goalert/permission" + "github.com/target/goalert/util/log" + + "github.com/pkg/errors" +) + +// UpdateAll will insert all verification requests into outgoing_messages. +func (db *DB) UpdateAll(ctx context.Context) error { + err := permission.LimitCheckAny(ctx, permission.System) + if err != nil { + return err + } + err = db.update(ctx) + return err +} +func (db *DB) update(ctx context.Context) error { + log.Debugf(ctx, "Processing verification messages.") + var err error + exec := func(s *sql.Stmt, msg string) bool { + _, err = db.lock.Exec(ctx, s) + if err != nil { + err = errors.Wrap(err, msg) + return false + } + return true + } + + if !exec(db.cleanupExpired, "cleanup expired codes") { + return err + } + + if !exec(db.insertMessages, "insert messages") { + return err + } + + return nil +} diff --git a/escalation/policy.go b/escalation/policy.go new file mode 100644 index 0000000000..2098cf0d2d --- /dev/null +++ b/escalation/policy.go @@ -0,0 +1,25 @@ +package escalation + +import ( + "github.com/target/goalert/validation/validate" +) + +type Policy struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Repeat int `json:"repeat"` +} + +func (p Policy) Normalize() (*Policy, error) { + err := validate.Many( + validate.IDName("Name", p.Name), + validate.Text("Description", p.Description, 1, 255), + validate.Range("Repeat", p.Repeat, 0, 5), + ) + if err != nil { + return nil, err + } + + return &p, nil +} diff --git a/escalation/policy_test.go b/escalation/policy_test.go new file mode 100644 index 0000000000..6a7380ea51 --- /dev/null +++ b/escalation/policy_test.go @@ -0,0 +1,36 @@ +package escalation + +import ( + "testing" +) + +func TestPolicy_Normalize(t *testing.T) { + test := func(valid bool, p Policy) { + name := "valid" + if !valid { + name = "invalid" + } + t.Run(name, func(t *testing.T) { + t.Logf("%+v", p) + _, err := p.Normalize() + if valid && err != nil { + t.Errorf("got %v; want nil", err) + } else if !valid && err == nil { + t.Errorf("got nil err; want non-nil") + } + }) + } + + valid := []Policy{ + {Name: "SampleEscPolicy", Description: "Sample Escalation Policy", Repeat: 1}, + } + invalid := []Policy{ + {Name: "SampleEscPolicy", Description: "Sample Escalation Policy", Repeat: -5}, + } + for _, p := range valid { + test(true, p) + } + for _, p := range invalid { + test(false, p) + } +} diff --git a/escalation/search.go b/escalation/search.go new file mode 100644 index 0000000000..b37a56e9e8 --- /dev/null +++ b/escalation/search.go @@ -0,0 +1,121 @@ +package escalation + +import ( + "context" + "database/sql" + "github.com/target/goalert/permission" + "github.com/target/goalert/search" + "github.com/target/goalert/validation/validate" + "text/template" + + "github.com/lib/pq" + "github.com/pkg/errors" +) + +// SearchOptions allow filtering and paginating the list of escalation policies. +type SearchOptions struct { + Search string `json:"s,omitempty"` + After SearchCursor `json:"a,omitempty"` + + // Omit specifies a list of policy IDs to exclude from the results. + Omit []string `json:"o,omitempty"` + + Limit int `json:"-"` +} + +// SearchCursor is used to indicate a position in a paginated list. +type SearchCursor struct { + Name string `json:"n,omitempty"` +} + +var searchTemplate = template.Must(template.New("search").Parse(` + SELECT + id, name, description, repeat + FROM escalation_policies pol + WHERE true + {{if .Omit}} + AND not id = any(:omit) + {{end}} + {{if .SearchStr}} + AND (pol.name ILIKE :search OR pol.description ILIKE :search) + {{end}} + {{if .After.Name}} + AND lower(pol.name) > lower(:afterName) + {{end}} + ORDER BY lower(pol.name) + LIMIT {{.Limit}} +`)) + +type renderData SearchOptions + +func (opts renderData) SearchStr() string { + if opts.Search == "" { + return "" + } + + return "%" + search.Escape(opts.Search) + "%" +} + +func (opts renderData) Normalize() (*renderData, error) { + if opts.Limit == 0 { + opts.Limit = search.DefaultMaxResults + } + + err := validate.Many( + validate.Text("Search", opts.Search, 0, search.MaxQueryLen), + validate.Range("Limit", opts.Limit, 0, search.MaxResults), + validate.ManyUUID("Omit", opts.Omit, 50), + ) + if opts.After.Name != "" { + err = validate.Many(err, validate.IDName("After.Name", opts.After.Name)) + } + + return &opts, err +} + +func (opts renderData) QueryArgs() []sql.NamedArg { + return []sql.NamedArg{ + sql.Named("search", opts.SearchStr()), + sql.Named("afterName", opts.After.Name), + sql.Named("omit", pq.StringArray(opts.Omit)), + } +} + +func (db *DB) Search(ctx context.Context, opts *SearchOptions) ([]Policy, error) { + err := permission.LimitCheckAny(ctx, permission.User) + if err != nil { + return nil, err + } + if opts == nil { + opts = &SearchOptions{} + } + data, err := (*renderData)(opts).Normalize() + if err != nil { + return nil, err + } + query, args, err := search.RenderQuery(ctx, searchTemplate, data) + if err != nil { + return nil, errors.Wrap(err, "render query") + } + + rows, err := db.db.QueryContext(ctx, query, args...) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, err + } + defer rows.Close() + + var result []Policy + var p Policy + for rows.Next() { + err = rows.Scan(&p.ID, &p.Name, &p.Description, &p.Repeat) + if err != nil { + return nil, err + } + result = append(result, p) + } + + return result, nil +} diff --git a/escalation/step.go b/escalation/step.go new file mode 100644 index 0000000000..bc7f19c630 --- /dev/null +++ b/escalation/step.go @@ -0,0 +1,41 @@ +package escalation + +import ( + "github.com/target/goalert/assignment" + "github.com/target/goalert/validation/validate" + "time" +) + +type ActiveStep struct { + StepID string + PolicyID string + AlertID int + LastEscalation time.Time + LoopCount int + ForceEscalation bool + StepNumber int +} + +type Step struct { + ID string `json:"id"` + PolicyID string `json:"escalation_policy_id"` + DelayMinutes int `json:"delay_minutes"` + StepNumber int `json:"step_number"` + + Targets []assignment.Target +} + +func (s Step) Delay() time.Duration { + return time.Duration(s.DelayMinutes) * time.Minute +} +func (s Step) Normalize() (*Step, error) { + err := validate.Many( + validate.UUID("PolicyID", s.PolicyID), + validate.Range("DelayMinutes", s.DelayMinutes, 1, 9000), + ) + if err != nil { + return nil, err + } + + return &s, nil +} diff --git a/escalation/step_test.go b/escalation/step_test.go new file mode 100644 index 0000000000..4dde5e9264 --- /dev/null +++ b/escalation/step_test.go @@ -0,0 +1,37 @@ +package escalation + +import ( + "testing" +) + +func TestStep_Normalize(t *testing.T) { + test := func(valid bool, s Step) { + name := "valid" + if !valid { + name = "invalid" + } + t.Run(name, func(t *testing.T) { + t.Logf("%+v", s) + _, err := s.Normalize() + if valid && err != nil { + t.Errorf("got %v; want nil", err) + } else if !valid && err == nil { + t.Errorf("got nil err; want non-nil") + } + }) + } + + valid := []Step{ + {PolicyID: "a81facc0-4764-012d-7bfb-002500d5d678", DelayMinutes: 1}, + } + + invalid := []Step{ + {PolicyID: "a81facc0-4764-012d-7bfb-002500d5d678", DelayMinutes: 9001}, + } + for _, s := range valid { + test(true, s) + } + for _, s := range invalid { + test(false, s) + } +} diff --git a/escalation/store.go b/escalation/store.go new file mode 100644 index 0000000000..7a13802afb --- /dev/null +++ b/escalation/store.go @@ -0,0 +1,975 @@ +package escalation + +import ( + "context" + "database/sql" + + alertlog "github.com/target/goalert/alert/log" + "github.com/target/goalert/assignment" + "github.com/target/goalert/notification/slack" + "github.com/target/goalert/notificationchannel" + "github.com/target/goalert/permission" + "github.com/target/goalert/util" + "github.com/target/goalert/util/log" + "github.com/target/goalert/validation/validate" + + "github.com/lib/pq" + "github.com/pkg/errors" + uuid "github.com/satori/go.uuid" +) + +type Store interface { + PolicyStore + StepStore + ActiveStepReader +} + +type PolicyStore interface { + FindOnePolicy(context.Context, string) (*Policy, error) + FindOnePolicyTx(context.Context, *sql.Tx, string) (*Policy, error) + FindOnePolicyForUpdateTx(context.Context, *sql.Tx, string) (*Policy, error) + FindAllPolicies(context.Context) ([]Policy, error) + CreatePolicy(context.Context, *Policy) (*Policy, error) + CreatePolicyTx(context.Context, *sql.Tx, *Policy) (*Policy, error) + UpdatePolicy(context.Context, *Policy) error + UpdatePolicyTx(context.Context, *sql.Tx, *Policy) error + DeletePolicy(ctx context.Context, id string) error + DeletePolicyTx(ctx context.Context, tx *sql.Tx, id string) error + FindAllStepTargets(ctx context.Context, stepID string) ([]assignment.Target, error) + FindAllStepTargetsTx(ctx context.Context, tx *sql.Tx, stepID string) ([]assignment.Target, error) + AddStepTarget(ctx context.Context, stepID string, tgt assignment.Target) error + AddStepTargetTx(ctx context.Context, tx *sql.Tx, stepID string, tgt assignment.Target) error + DeleteStepTarget(ctx context.Context, stepID string, tgt assignment.Target) error + DeleteStepTargetTx(ctx context.Context, tx *sql.Tx, stepID string, tgt assignment.Target) error + FindAllPoliciesBySchedule(ctx context.Context, scheduleID string) ([]Policy, error) + FindManyPolicies(ctx context.Context, ids []string) ([]Policy, error) + DeleteManyPoliciesTx(ctx context.Context, tx *sql.Tx, ids []string) error + + Search(context.Context, *SearchOptions) ([]Policy, error) +} + +type StepStore interface { + // CreateStep is replaced by CreateStepTx. + CreateStep(context.Context, *Step) (*Step, error) + + // CreateStepTx will create an escalation policy step within the given transaction. + // Note: Targets are not assigned during creation. + CreateStepTx(context.Context, *sql.Tx, *Step) (*Step, error) + UpdateStepNumberTx(context.Context, *sql.Tx, string, int) error + + // Update step allows updating a steps delay + // Note: it does not update the Targets. + UpdateStep(context.Context, *Step) error + UpdateStepDelayTx(context.Context, *sql.Tx, string, int) error + DeleteStep(context.Context, string) (string, error) + DeleteStepTx(context.Context, *sql.Tx, string) (string, error) + MoveStep(context.Context, string, int) error +} + +type ActiveStepReader interface { + ActiveStep(ctx context.Context, alertID int, policyID string) (*ActiveStep, error) + + // FindOneStep will return a single escalation policy step. + // Note: it does not currently fetch the Targets. + FindOneStep(context.Context, string) (*Step, error) + FindOneStepTx(context.Context, *sql.Tx, string) (*Step, error) + FindOneStepForUpdateTx(context.Context, *sql.Tx, string) (*Step, error) + + // FindAllSteps will return escalation policy steps for the given policy ID. + // Note: it does not currently fetch the Targets. + FindAllSteps(context.Context, string) ([]Step, error) + FindAllStepsTx(context.Context, *sql.Tx, string) ([]Step, error) + FindAllOnCallStepsForUserTx(ctx context.Context, tx *sql.Tx, userID string) ([]Step, error) +} + +type Manager interface { + ActiveStepReader + + FindOnePolicy(context.Context, string) (*Policy, error) +} + +var _ Manager = &DB{} +var _ Store = &DB{} + +type Config struct { + NCStore notificationchannel.Store + LogStore alertlog.Store + SlackLookupFunc func(ctx context.Context, channelID string) (*slack.Channel, error) +} + +type DB struct { + db *sql.DB + + log alertlog.Store + ncStore notificationchannel.Store + slackFn func(ctx context.Context, channelID string) (*slack.Channel, error) + + findSlackChan *sql.Stmt + + findOnePolicy *sql.Stmt + findOnePolicyForUpdate *sql.Stmt + findManyPolicies *sql.Stmt + findAllPolicies *sql.Stmt + findAllPoliciesBySchedule *sql.Stmt + createPolicy *sql.Stmt + updatePolicy *sql.Stmt + deletePolicy *sql.Stmt + + findOneStep *sql.Stmt + findOneStepForUpdate *sql.Stmt + findAllSteps *sql.Stmt + findAllOnCallSteps *sql.Stmt + createStep *sql.Stmt + updateStepDelay *sql.Stmt + updateStepNumber *sql.Stmt + deleteStep *sql.Stmt + moveStep *sql.Stmt + + activeStep *sql.Stmt + + addStepTarget *sql.Stmt + deleteStepTarget *sql.Stmt + findAllStepTargets *sql.Stmt +} + +func NewDB(ctx context.Context, db *sql.DB, cfg Config) (*DB, error) { + p := &util.Prepare{DB: db, Ctx: ctx} + + return &DB{ + db: db, + log: cfg.LogStore, + slackFn: cfg.SlackLookupFunc, + ncStore: cfg.NCStore, + + findSlackChan: p.P(` + SELECT chan.id + FROM notification_channels chan + JOIN escalation_policy_actions act ON + act.escalation_policy_step_id = $1 AND + act.channel_id = chan.id + WHERE chan.value = $2 + `), + + findOnePolicy: p.P(`SELECT id, name, description, repeat FROM escalation_policies WHERE id = $1`), + findOnePolicyForUpdate: p.P(`SELECT id, name, description, repeat FROM escalation_policies WHERE id = $1 FOR UPDATE`), + findManyPolicies: p.P(`SELECT id, name, description, repeat FROM escalation_policies WHERE id = any($1)`), + + findAllPolicies: p.P(`SELECT id, name, description, repeat FROM escalation_policies`), + findAllPoliciesBySchedule: p.P(` + SELECT DISTINCT + step.escalation_policy_id, + pol.name, + pol.description, + pol.repeat + FROM + escalation_policy_actions as act + JOIN + escalation_policy_steps as step on step.id = act.escalation_policy_step_id + JOIN + escalation_policies as pol on pol.id = step.escalation_policy_id + WHERE + act.schedule_id = $1 + `), + createPolicy: p.P(`INSERT INTO escalation_policies (id, name, description, repeat) VALUES ($1, $2, $3, $4)`), + updatePolicy: p.P(`UPDATE escalation_policies SET name = $2, description = $3, repeat = $4 WHERE id = $1`), + deletePolicy: p.P(`DELETE FROM escalation_policies WHERE id = any($1)`), + + addStepTarget: p.P(` + INSERT INTO escalation_policy_actions (escalation_policy_step_id, user_id, schedule_id, rotation_id, channel_id) + VALUES ($1, $2, $3, $4, $5) + ON CONFLICT DO NOTHING + `), + deleteStepTarget: p.P(` + DELETE FROM escalation_policy_actions + WHERE + escalation_policy_step_id = $1 AND + ( + user_id = $2 OR + schedule_id = $3 OR + rotation_id = $4 OR + channel_id = $5 + ) + `), + findAllStepTargets: p.P(` + SELECT + user_id, + schedule_id, + rotation_id, + channel_id, + chan.type, + chan.value, + COALESCE(users.name, rot.name, sched.name, chan.name) + FROM + escalation_policy_actions act + LEFT JOIN users + on act.user_id = users.id + LEFT JOIN rotations rot + on act.rotation_id = rot.id + LEFT JOIN schedules sched + on act.schedule_id = sched.id + LEFT JOIN notification_channels chan + on act.channel_id = chan.id + WHERE + escalation_policy_step_id = $1 + `), + + findOneStep: p.P(`SELECT id, escalation_policy_id, delay, step_number FROM escalation_policy_steps WHERE id = $1`), + findOneStepForUpdate: p.P(`SELECT id, escalation_policy_id, delay, step_number FROM escalation_policy_steps WHERE id = $1 FOR UPDATE`), + findAllSteps: p.P(`SELECT id, escalation_policy_id, delay, step_number FROM escalation_policy_steps WHERE escalation_policy_id = $1 ORDER BY step_number`), + findAllOnCallSteps: p.P(` + SELECT step.id, step.escalation_policy_id, step.delay, step.step_number + FROM ep_step_on_call_users oc + JOIN escalation_policy_steps step ON step.id = oc.ep_step_id + WHERE oc.user_id = $1 AND oc.end_time isnull + ORDER BY step.escalation_policy_id, step.step_number + `), + + createStep: p.P(` + INSERT INTO escalation_policy_steps + (id, escalation_policy_id, delay, step_number) + VALUES ($1, $2, $3, DEFAULT) + RETURNING step_number + `), + updateStepDelay: p.P(`UPDATE escalation_policy_steps SET delay = $2 WHERE id = $1`), + updateStepNumber: p.P(`UPDATE escalation_policy_steps SET step_number = $2 WHERE id = $1`), + deleteStep: p.P(`DELETE FROM escalation_policy_steps WHERE id = $1 RETURNING escalation_policy_id`), + moveStep: p.P(` + WITH calc AS ( + SELECT + escalation_policy_id esc_id, + step_number old_pos, + LEAST(step_number, $2) min, + GREATEST(step_number, $2) max, + ($2 - step_number) diff, + CASE + WHEN step_number < $2 THEN abs($2-step_number) + WHEN step_number > $2 THEN 1 + ELSE 0 + END shift + FROM escalation_policy_steps + WHERE id = $1 + FOR UPDATE + ) + UPDATE escalation_policy_steps + SET step_number = ((step_number - calc.min) + calc.shift) % (abs(calc.diff) + 1) + calc.min + FROM calc + WHERE + escalation_policy_id = calc.esc_id AND + step_number >= calc.min AND + step_number <= calc.max + RETURNING escalation_policy_id + `), + + activeStep: p.P(` + SELECT + escalation_policy_step_id, + last_escalation, + loop_count, + force_escalation, + escalation_policy_step_number + FROM escalation_policy_state + WHERE alert_id = $1 AND escalation_policy_id = $2 + `), + }, p.Err +} + +func (db *DB) logChange(ctx context.Context, tx *sql.Tx, policyID string) { + err := db.log.LogEPTx(ctx, tx, policyID, alertlog.TypePolicyUpdated, nil) + if err != nil { + log.Log(ctx, errors.Wrap(err, "append alertlog (escalation policy update)")) + } +} + +func validStepTarget(tgt assignment.Target) error { + return validate.Many( + validate.UUID("TargetID", tgt.TargetID()), + validate.OneOf("TargetType", tgt.TargetType(), + assignment.TargetTypeUser, + assignment.TargetTypeSchedule, + assignment.TargetTypeRotation, + assignment.TargetTypeNotificationChannel, + ), + ) +} + +func tgtFields(id string, tgt assignment.Target) []interface{} { + var usr, sched, rot, ch sql.NullString + switch tgt.TargetType() { + case assignment.TargetTypeUser: + usr.Valid = true + usr.String = tgt.TargetID() + case assignment.TargetTypeSchedule: + sched.Valid = true + sched.String = tgt.TargetID() + case assignment.TargetTypeRotation: + rot.Valid = true + rot.String = tgt.TargetID() + case assignment.TargetTypeNotificationChannel: + ch.Valid = true + ch.String = tgt.TargetID() + } + return []interface{}{ + id, + usr, + sched, + rot, + ch, + } +} + +func (db *DB) FindManyPolicies(ctx context.Context, ids []string) ([]Policy, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + + err = validate.ManyUUID("EscalationPolicyID", ids, 200) + if err != nil { + return nil, err + } + + rows, err := db.findManyPolicies.QueryContext(ctx, pq.StringArray(ids)) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, err + } + defer rows.Close() + + var result []Policy + var p Policy + for rows.Next() { + err = rows.Scan(&p.ID, &p.Name, &p.Description, &p.Repeat) + if err != nil { + return nil, err + } + result = append(result, p) + } + + return result, nil +} + +func (db *DB) _updateStepTarget(ctx context.Context, stepID string, tgt assignment.Target, stmt *sql.Stmt) error { + err := validate.Many( + validate.UUID("StepID", stepID), + validStepTarget(tgt), + ) + if err != nil { + return err + } + err = permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return err + } + _, err = stmt.ExecContext(ctx, tgtFields(stepID, tgt)...) + if err == sql.ErrNoRows { + err = nil + } + return err +} + +func (db *DB) newSlackChannel(ctx context.Context, tx *sql.Tx, slackChanID string) (assignment.Target, error) { + ch, err := db.slackFn(ctx, slackChanID) + if err != nil { + return nil, err + } + + notifCh, err := db.ncStore.CreateTx(ctx, tx, ¬ificationchannel.Channel{ + Type: notificationchannel.TypeSlack, + Name: ch.Name, + Value: ch.ID, + }) + if err != nil { + return nil, err + } + + return assignment.NotificationChannelTarget(notifCh.ID), nil +} +func (db *DB) lookupSlackChannel(ctx context.Context, tx *sql.Tx, stepID, slackChanID string) (assignment.Target, error) { + var notifChanID string + err := tx.StmtContext(ctx, db.findSlackChan).QueryRowContext(ctx, stepID, slackChanID).Scan(¬ifChanID) + if err != nil { + return nil, err + } + + return assignment.NotificationChannelTarget(notifChanID), nil +} + +func (db *DB) AddStepTarget(ctx context.Context, stepID string, tgt assignment.Target) error { + return db._updateStepTarget(ctx, stepID, tgt, db.addStepTarget) +} + +func (db *DB) AddStepTargetTx(ctx context.Context, tx *sql.Tx, stepID string, tgt assignment.Target) error { + if tgt.TargetType() == assignment.TargetTypeSlackChannel { + var err error + tgt, err = db.newSlackChannel(ctx, tx, tgt.TargetID()) + if err != nil { + return err + } + } + return db._updateStepTarget(ctx, stepID, tgt, tx.StmtContext(ctx, db.addStepTarget)) +} + +func (db *DB) DeleteStepTarget(ctx context.Context, stepID string, tgt assignment.Target) error { + return db._updateStepTarget(ctx, stepID, tgt, db.deleteStepTarget) +} + +func (db *DB) DeleteStepTargetTx(ctx context.Context, tx *sql.Tx, stepID string, tgt assignment.Target) error { + if tgt.TargetType() == assignment.TargetTypeSlackChannel { + var err error + tgt, err = db.lookupSlackChannel(ctx, tx, stepID, tgt.TargetID()) + if err != nil { + return err + } + } + return db._updateStepTarget(ctx, stepID, tgt, tx.StmtContext(ctx, db.deleteStepTarget)) +} + +func (db *DB) FindAllStepTargets(ctx context.Context, stepID string) ([]assignment.Target, error) { + return db.FindAllStepTargetsTx(ctx, nil, stepID) +} + +func (db *DB) FindAllStepTargetsTx(ctx context.Context, tx *sql.Tx, stepID string) ([]assignment.Target, error) { + err := permission.LimitCheckAny(ctx, permission.User) + if err != nil { + return nil, err + } + + err = validate.UUID("StepID", stepID) + if err != nil { + return nil, err + } + + stmt := db.findAllStepTargets + if tx != nil { + stmt = tx.StmtContext(ctx, stmt) + } + + rows, err := stmt.QueryContext(ctx, stepID) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, err + } + defer rows.Close() + + var tgts []assignment.Target + for rows.Next() { + var usr, sched, rot, ch, chValue sql.NullString + var chType *notificationchannel.Type + var tgt assignment.RawTarget + err = rows.Scan(&usr, &sched, &rot, &ch, &chType, &chValue, &tgt.Name) + if err != nil { + return nil, err + } + + switch { + case usr.Valid: + tgt.ID = usr.String + tgt.Type = assignment.TargetTypeUser + case sched.Valid: + tgt.ID = sched.String + tgt.Type = assignment.TargetTypeSchedule + case rot.Valid: + tgt.ID = rot.String + tgt.Type = assignment.TargetTypeRotation + case ch.Valid: + switch *chType { + case notificationchannel.TypeSlack: + tgt.ID = chValue.String + tgt.Type = assignment.TargetTypeSlackChannel + default: + tgt.ID = ch.String + tgt.Type = assignment.TargetTypeNotificationChannel + } + default: + continue + } + tgts = append(tgts, tgt) + } + + return tgts, nil +} + +func (db *DB) ActiveStep(ctx context.Context, alertID int, policyID string) (*ActiveStep, error) { + err := validate.UUID("EscalationPolicyID", policyID) + if err != nil { + return nil, err + } + err = permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + + row := db.activeStep.QueryRowContext(ctx, alertID, policyID) + var step ActiveStep + var stepID sql.NullString + err = row.Scan(&stepID, &step.LastEscalation, &step.LoopCount, &step.ForceEscalation, &step.StepNumber) + if err == sql.ErrNoRows { + return nil, nil + } + step.StepID = stepID.String + step.PolicyID = policyID + step.AlertID = alertID + return &step, nil +} + +func (db *DB) CreatePolicy(ctx context.Context, p *Policy) (*Policy, error) { + return db.CreatePolicyTx(ctx, nil, p) +} + +func (db *DB) CreatePolicyTx(ctx context.Context, tx *sql.Tx, p *Policy) (*Policy, error) { + err := permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return nil, err + } + + n, err := p.Normalize() + if err != nil { + return nil, err + } + + stmt := db.createPolicy + if tx != nil { + stmt = tx.StmtContext(ctx, stmt) + } + + n.ID = uuid.NewV4().String() + + _, err = stmt.ExecContext(ctx, n.ID, n.Name, n.Description, n.Repeat) + if err != nil { + return nil, err + } + return n, nil +} + +func (db *DB) UpdatePolicy(ctx context.Context, p *Policy) error { + return db.UpdatePolicyTx(ctx, nil, p) +} + +func (db *DB) UpdatePolicyTx(ctx context.Context, tx *sql.Tx, p *Policy) error { + err := validate.UUID("EscalationPolicyID", p.ID) + if err != nil { + return err + } + n, err := p.Normalize() + if err != nil { + return err + } + + err = permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return err + } + + stmt := db.updatePolicy + if tx != nil { + stmt = tx.StmtContext(ctx, stmt) + } + + _, err = stmt.ExecContext(ctx, n.ID, n.Name, n.Description, n.Repeat) + if err != nil { + return err + } + + db.logChange(ctx, nil, p.ID) + + return nil +} + +func (db *DB) DeletePolicy(ctx context.Context, id string) error { + return db.DeletePolicyTx(ctx, nil, id) +} + +func (db *DB) DeletePolicyTx(ctx context.Context, tx *sql.Tx, id string) error { + return db.DeleteManyPoliciesTx(ctx, tx, []string{id}) +} + +func (db *DB) DeleteManyPoliciesTx(ctx context.Context, tx *sql.Tx, ids []string) error { + err := permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return err + } + err = validate.ManyUUID("EscalationPolicyID", ids, 50) + if err != nil { + return err + } + + s := db.deletePolicy + if tx != nil { + s = tx.StmtContext(ctx, s) + } + _, err = s.ExecContext(ctx, pq.StringArray(ids)) + return err +} + +func (db *DB) FindOnePolicy(ctx context.Context, id string) (*Policy, error) { + return db.FindOnePolicyTx(ctx, nil, id) +} + +func (db *DB) FindOnePolicyTx(ctx context.Context, tx *sql.Tx, id string) (*Policy, error) { + err := validate.UUID("EscalationPolicyID", id) + if err != nil { + return nil, err + } + + err = permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + + stmt := db.findOnePolicy + if tx != nil { + stmt = tx.StmtContext(ctx, stmt) + } + + row := stmt.QueryRowContext(ctx, id) + var p Policy + err = row.Scan(&p.ID, &p.Name, &p.Description, &p.Repeat) + return &p, err +} + +func (db *DB) FindOnePolicyForUpdateTx(ctx context.Context, tx *sql.Tx, id string) (*Policy, error) { + err := validate.UUID("EscalationPolicyID", id) + if err != nil { + return nil, err + } + + err = permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + + stmt := db.findOnePolicyForUpdate + if tx != nil { + stmt = tx.StmtContext(ctx, stmt) + } + + row := stmt.QueryRowContext(ctx, id) + var p Policy + err = row.Scan(&p.ID, &p.Name, &p.Description, &p.Repeat) + return &p, err +} + +func (db *DB) FindAllPolicies(ctx context.Context) ([]Policy, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + + rows, err := db.findAllPolicies.QueryContext(ctx) + if err != nil { + return nil, err + } + defer rows.Close() + + var p Policy + policies := []Policy{} + for rows.Next() { + err = rows.Scan(&p.ID, &p.Name, &p.Description, &p.Repeat) + if err != nil { + return nil, err + } + policies = append(policies, p) + } + + return policies, nil + +} + +func (db *DB) FindAllPoliciesBySchedule(ctx context.Context, scheduleID string) ([]Policy, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + err = validate.UUID("ScheduleID", scheduleID) + if err != nil { + return nil, err + } + rows, err := db.findAllPoliciesBySchedule.QueryContext(ctx, scheduleID) + if err != nil { + return nil, err + } + defer rows.Close() + + var p Policy + var policies []Policy + for rows.Next() { + err = rows.Scan(&p.ID, &p.Name, &p.Description, &p.Repeat) + if err != nil { + return nil, err + } + policies = append(policies, p) + } + + return policies, nil +} + +func (db *DB) FindOneStep(ctx context.Context, id string) (*Step, error) { + return db.FindOneStepTx(ctx, nil, id) +} + +func (db *DB) FindOneStepTx(ctx context.Context, tx *sql.Tx, id string) (*Step, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + + err = validate.UUID("EscalationPolicyStepID ", id) + if err != nil { + return nil, err + } + + stmt := db.findOneStep + if tx != nil { + stmt = tx.StmtContext(ctx, stmt) + } + + row := stmt.QueryRowContext(ctx, id) + var s Step + err = row.Scan(&s.ID, &s.PolicyID, &s.DelayMinutes, &s.StepNumber) + if err != nil { + return nil, err + } + + return &s, nil +} + +func (db *DB) FindOneStepForUpdateTx(ctx context.Context, tx *sql.Tx, id string) (*Step, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + + err = validate.UUID("EscalationPolicyStepID ", id) + if err != nil { + return nil, err + } + + stmt := db.findOneStepForUpdate + if tx != nil { + stmt = tx.StmtContext(ctx, stmt) + } + + row := stmt.QueryRowContext(ctx, id) + var s Step + err = row.Scan(&s.ID, &s.PolicyID, &s.DelayMinutes, &s.StepNumber) + if err != nil { + return nil, err + } + + return &s, nil +} + +func (db *DB) FindAllSteps(ctx context.Context, policyID string) ([]Step, error) { + return db.FindAllStepsTx(ctx, nil, policyID) +} +func (db *DB) FindAllOnCallStepsForUserTx(ctx context.Context, tx *sql.Tx, userID string) ([]Step, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + err = validate.UUID("UserID", userID) + if err != nil { + return nil, err + } + + stmt := db.findAllOnCallSteps + if tx != nil { + stmt = tx.StmtContext(ctx, stmt) + } + + rows, err := stmt.QueryContext(ctx, userID) + if err != nil { + return nil, err + } + defer rows.Close() + + var result []Step + for rows.Next() { + var s Step + err = rows.Scan(&s.ID, &s.PolicyID, &s.DelayMinutes, &s.StepNumber) + if err != nil { + return nil, err + } + result = append(result, s) + } + return result, nil +} +func (db *DB) FindAllStepsTx(ctx context.Context, tx *sql.Tx, policyID string) ([]Step, error) { + err := validate.UUID("EscalationPolicyID", policyID) + if err != nil { + return nil, err + } + + err = permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + + stmt := db.findAllSteps + if tx != nil { + stmt = tx.StmtContext(ctx, stmt) + } + + rows, err := stmt.QueryContext(ctx, policyID) + if err != nil { + return nil, err + } + defer rows.Close() + + var result []Step + for rows.Next() { + var s Step + err = rows.Scan(&s.ID, &s.PolicyID, &s.DelayMinutes, &s.StepNumber) + if err != nil { + return nil, err + } + result = append(result, s) + } + return result, nil +} + +func (db *DB) CreateStep(ctx context.Context, s *Step) (*Step, error) { + return db.CreateStepTx(ctx, nil, s) +} + +func (db *DB) CreateStepTx(ctx context.Context, tx *sql.Tx, s *Step) (*Step, error) { + err := permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return nil, err + } + + n, err := s.Normalize() + if err != nil { + return nil, err + } + + stmt := db.createStep + if tx != nil { + stmt = tx.StmtContext(ctx, stmt) + } + + n.ID = uuid.NewV4().String() + + err = stmt.QueryRowContext(ctx, n.ID, n.PolicyID, n.DelayMinutes).Scan(&n.StepNumber) + if err != nil { + return nil, err + } + + db.logChange(ctx, tx, s.PolicyID) + + return n, nil +} + +func (db *DB) UpdateStepNumberTx(ctx context.Context, tx *sql.Tx, stepID string, stepNumber int) error { + err := permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return err + } + + err = validate.UUID("EscalationPolicyStepID", stepID) + if err != nil { + return err + } + + numStmt := db.updateStepNumber + if tx != nil { + numStmt = tx.StmtContext(ctx, numStmt) + } + + _, err = numStmt.ExecContext(ctx, stepID, stepNumber) + if err != nil { + return err + } + + return nil +} + +func (db *DB) UpdateStep(ctx context.Context, s *Step) error { + return db.UpdateStepDelayTx(ctx, nil, s.ID, s.DelayMinutes) +} + +func (db *DB) UpdateStepDelayTx(ctx context.Context, tx *sql.Tx, stepID string, stepDelay int) error { + err := permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return err + } + + err = validate.UUID("EscalationPolicyStepID", stepID) + if err != nil { + return err + } + + err = validate.Range("DelayMinutes", stepDelay, 1, 9000) + if err != nil { + return err + } + + stmt := db.updateStepDelay + if tx != nil { + stmt = tx.StmtContext(ctx, stmt) + } + + _, err = stmt.ExecContext(ctx, stepID, stepDelay) + if err != nil { + return err + } + + return nil +} + +func (db *DB) DeleteStep(ctx context.Context, id string) (string, error) { + return db.DeleteStepTx(ctx, nil, id) +} + +func (db *DB) DeleteStepTx(ctx context.Context, tx *sql.Tx, id string) (string, error) { + err := validate.UUID("EscalationPolicyStepID", id) + if err != nil { + return "", err + } + + err = permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return "", err + } + s := db.deleteStep + if tx != nil { + s = tx.StmtContext(ctx, s) + } + row := s.QueryRowContext(ctx, id) + var polID string + err = row.Scan(&polID) + if err != nil { + return "", err + } + + db.logChange(ctx, tx, polID) + + return polID, nil +} + +func (db *DB) MoveStep(ctx context.Context, id string, newPos int) error { + err := validate.Many( + validate.UUID("EscalationPolicyStepID", id), + validate.Range("NewPosition", newPos, 0, 9000), + ) + if err != nil { + return err + } + err = permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return err + } + + var polID string + err = db.moveStep.QueryRowContext(ctx, id, newPos).Scan(&polID) + if err != nil { + return err + } + db.logChange(ctx, nil, polID) + + return nil +} diff --git a/genericapi/config.go b/genericapi/config.go new file mode 100644 index 0000000000..61a989c583 --- /dev/null +++ b/genericapi/config.go @@ -0,0 +1,16 @@ +package genericapi + +import ( + "github.com/target/goalert/alert" + "github.com/target/goalert/heartbeat" + "github.com/target/goalert/integrationkey" + "github.com/target/goalert/user" +) + +// Config contains the values needed to implement the generic API handler. +type Config struct { + AlertStore alert.Store + IntegrationKeyStore integrationkey.Store + HeartbeatStore heartbeat.Store + UserStore user.Store +} diff --git a/genericapi/handler.go b/genericapi/handler.go new file mode 100644 index 0000000000..c777e6ac10 --- /dev/null +++ b/genericapi/handler.go @@ -0,0 +1,114 @@ +package genericapi + +import ( + "database/sql" + "net/http" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/target/goalert/alert" + "github.com/target/goalert/auth" + "github.com/target/goalert/permission" + "github.com/target/goalert/retry" + "github.com/target/goalert/util/errutil" + "github.com/target/goalert/validation/validate" +) + +// Handler responds to generic API requests +type Handler struct { + c Config +} + +// NewHandler creates a new Handler, registering generic API routes using chi. +func NewHandler(c Config) *Handler { + return &Handler{c: c} +} + +// ServeUserAvatar will serve a redirect for a users avatar image. +func (h *Handler) ServeUserAvatar(w http.ResponseWriter, req *http.Request) { + parts := strings.Split(req.URL.Path, "/") + userID := parts[len(parts)-1] + + ctx := req.Context() + u, err := h.c.UserStore.FindOne(ctx, userID) + if errors.Cause(err) == sql.ErrNoRows { + http.NotFound(w, req) + return + } + if errutil.HTTPError(ctx, w, err) { + return + } + + fullSize := req.FormValue("size") == "large" + http.Redirect(w, req, u.ResolveAvatarURL(fullSize), http.StatusFound) +} + +// ServeHeartbeatCheck serves the heartbeat check-in endpoint. +func (h *Handler) ServeHeartbeatCheck(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + parts := strings.Split(r.URL.Path, "/") + monitorID := parts[len(parts)-1] + + err := retry.DoTemporaryError(func(_ int) error { + return h.c.HeartbeatStore.Heartbeat(ctx, monitorID) + }, + retry.Log(ctx), + retry.Limit(12), + retry.FibBackoff(time.Second), + ) + if errors.Cause(err) == sql.ErrNoRows { + auth.Delay(ctx) + http.NotFound(w, r) + return + } + if errutil.HTTPError(ctx, w, err) { + return + } +} + +// ServeCreateAlert allows creating or closing an alert. +func (h *Handler) ServeCreateAlert(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + err := permission.LimitCheckAny(ctx, permission.Service) + if errutil.HTTPError(ctx, w, err) { + return + } + serviceID := permission.ServiceID(ctx) + + summary := r.FormValue("summary") + details := r.FormValue("details") + + summary = validate.SanitizeText(summary, alert.MaxSummaryLength) + details = validate.SanitizeText(details, alert.MaxDetailsLength) + + status := alert.StatusTriggered + if r.FormValue("action") == "close" { + status = alert.StatusClosed + } + + a := &alert.Alert{ + Summary: summary, + Details: details, + Source: alert.SourceGeneric, + ServiceID: serviceID, + Dedup: alert.NewUserDedup(r.FormValue("dedup")), + Status: status, + } + + err = retry.DoTemporaryError(func(int) error { + _, err = h.c.AlertStore.CreateOrUpdate(ctx, a) + return err + }, + retry.Log(ctx), + retry.Limit(10), + retry.FibBackoff(time.Second), + ) + if errutil.HTTPError(ctx, w, errors.Wrap(err, "create alert")) { + return + } + + w.WriteHeader(204) +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000000..0b21f72f66 --- /dev/null +++ b/go.mod @@ -0,0 +1,80 @@ +module github.com/target/goalert + +go 1.12 + +require ( + cloud.google.com/go v0.37.4 + contrib.go.opencensus.io/exporter/stackdriver v0.5.0 + github.com/99designs/gqlgen v0.8.3 + github.com/VividCortex/ewma v1.1.1 // indirect + github.com/abiosoft/ishell v2.0.0+incompatible + github.com/abiosoft/readline v0.0.0-20180607040430-155bce2042db // indirect + github.com/agnivade/levenshtein v1.0.2 // indirect + github.com/alecthomas/assert v0.0.0-20170929043011-405dbfeb8e38 // indirect + github.com/alecthomas/chroma v0.4.0 + github.com/alecthomas/colour v0.0.0-20160524082231-60882d9e2721 // indirect + github.com/alecthomas/repr v0.0.0-20181024024818-d37bc2a10ba1 // indirect + github.com/alexeyco/simpletable v0.0.0-20180729223640-1fa9009f1080 + github.com/brianvoe/gofakeit v3.11.1+incompatible + github.com/chzyer/logex v1.1.10 // indirect + github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 // indirect + github.com/cockroachdb/apd v1.1.0 // indirect + github.com/coreos/go-oidc v0.0.0-20180604232625-8ae1da518bd4 + github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964 // indirect + github.com/davecgh/go-spew v1.1.1 + github.com/dgrijalva/jwt-go v3.2.0+incompatible + github.com/dlclark/regexp2 v1.1.6 // indirect + github.com/fatih/color v1.7.0 + github.com/felixge/httpsnoop v1.0.0 + github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect + github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef + github.com/golang/mock v1.2.0 + github.com/golang/protobuf v1.3.1 // indirect + github.com/google/go-github v15.0.0+incompatible + github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135 // indirect + github.com/gordonklaus/ineffassign v0.0.0-20180909121442-1003c8bd00dc + github.com/gorilla/websocket v1.4.0 // indirect + github.com/graphql-go/graphql v0.7.5 + github.com/hashicorp/golang-lru v0.5.1 // indirect + github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect + github.com/jackc/pgx v0.0.0-20180811142536-aa561b8f3c54 + github.com/joho/godotenv v1.2.0 + github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect + github.com/lib/pq v0.0.0-20190326042056-d6156e141ac6 + github.com/magiconair/properties v1.8.0 // indirect + github.com/mattn/go-colorable v0.0.9 + github.com/mattn/go-isatty v0.0.3 // indirect + github.com/mitchellh/mapstructure v1.1.2 // indirect + github.com/pelletier/go-toml v1.2.0 + github.com/pkg/errors v0.8.1 + github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 // indirect + github.com/rubenv/sql-migrate v0.0.0-20180618073704-aff46b65bb7f + github.com/satori/go.uuid v1.2.0 + github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 // indirect + github.com/sirupsen/logrus v1.4.1 + github.com/spf13/afero v1.1.1 // indirect + github.com/spf13/cast v1.2.0 // indirect + github.com/spf13/cobra v0.0.3 + github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec // indirect + github.com/spf13/pflag v1.0.1 // indirect + github.com/spf13/viper v1.0.2 + github.com/stretchr/testify v1.3.0 + github.com/ttacon/builder v0.0.0-20170518171403-c099f663e1c2 // indirect + github.com/ttacon/libphonenumber v1.0.1 + github.com/vbauerster/mpb v3.3.2+incompatible + github.com/vektah/gqlparser v1.1.2 + go.opencensus.io v0.20.2 + golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 + golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6 + golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a + golang.org/x/sys v0.0.0-20190429094411-2cc0cad0ac78 // indirect + golang.org/x/text v0.3.2 // indirect + golang.org/x/tools v0.0.0-20190428024724-550556f78a90 + google.golang.org/api v0.3.2 // indirect + google.golang.org/appengine v1.5.0 // indirect + google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7 + google.golang.org/grpc v1.20.1 // indirect + gopkg.in/square/go-jose.v2 v2.1.6 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000000..8fe45a9e37 --- /dev/null +++ b/go.sum @@ -0,0 +1,321 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU= +cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= +contrib.go.opencensus.io/exporter/stackdriver v0.5.0 h1:QpgHy9ctBU5KGQA816RlOnnEIRoJ29VCPpAl3vzWc4A= +contrib.go.opencensus.io/exporter/stackdriver v0.5.0/go.mod h1:QeFzMJDAw8TXt5+aRaSuE8l5BwaMIOIlaVkBOPRuMuw= +github.com/99designs/gqlgen v0.8.3 h1:I6bMglXNKkn4KlvkSMzqZw53e1N2FF9Gud4NmsOxqiA= +github.com/99designs/gqlgen v0.8.3/go.mod h1:aLyJw9xUgdJxZ8EqNQxo2pGFhXXJ/hq8t7J4yn8TgI4= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM= +github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= +github.com/abiosoft/ishell v2.0.0+incompatible h1:zpwIuEHc37EzrsIYah3cpevrIc8Oma7oZPxr03tlmmw= +github.com/abiosoft/ishell v2.0.0+incompatible/go.mod h1:HQR9AqF2R3P4XXpMpI0NAzgHf/aS6+zVXRj14cVk9qg= +github.com/abiosoft/readline v0.0.0-20180607040430-155bce2042db h1:CjPUSXOiYptLbTdr1RceuZgSFDQ7U15ITERUGrUORx8= +github.com/abiosoft/readline v0.0.0-20180607040430-155bce2042db/go.mod h1:rB3B4rKii8V21ydCbIzH5hZiCQE7f5E9SzUb/ZZx530= +github.com/agnivade/levenshtein v1.0.1 h1:3oJU7J3FGFmyhn8KHjmVaZCN5hxTr7GxgRue+sxIXdQ= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/agnivade/levenshtein v1.0.2 h1:xKF7WlEzoa+ZVkzBxy0ukdzI2etYiWGlTPMNTBGncKI= +github.com/agnivade/levenshtein v1.0.2/go.mod h1:JLvzGblJATanj48SD0YhHTEFGkWvw3ASLFWSiMIFXsE= +github.com/alecthomas/assert v0.0.0-20170929043011-405dbfeb8e38 h1:smF2tmSOzy2Mm+0dGI2AIUHY+w0BUc+4tn40djz7+6U= +github.com/alecthomas/assert v0.0.0-20170929043011-405dbfeb8e38/go.mod h1:r7bzyVFMNntcxPZXK3/+KdruV1H5KSlyVY0gc+NgInI= +github.com/alecthomas/chroma v0.4.0 h1:asyiVqfr7T9308T8sgZnopIXhb0SiZVIXNp89V+b46I= +github.com/alecthomas/chroma v0.4.0/go.mod h1:MmozekIi2rfQSzDcdEZ2BoJ9Pxs/7uc2Y4Boh+hIeZo= +github.com/alecthomas/colour v0.0.0-20160524082231-60882d9e2721 h1:JHZL0hZKJ1VENNfmXvHbgYlbUOvpzYzvy2aZU5gXVeo= +github.com/alecthomas/colour v0.0.0-20160524082231-60882d9e2721/go.mod h1:QO9JBoKquHd+jz9nshCh40fOfO+JzsoXy8qTHF68zU0= +github.com/alecthomas/repr v0.0.0-20181024024818-d37bc2a10ba1 h1:GDQdwm/gAcJcLAKQQZGOJ4knlw+7rfEQQcmwTbt4p5E= +github.com/alecthomas/repr v0.0.0-20181024024818-d37bc2a10ba1/go.mod h1:xTS7Pm1pD1mvyM075QCDSRqH6qRLXylzS24ZTpRiSzQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alexeyco/simpletable v0.0.0-20180729223640-1fa9009f1080 h1:LxG2QAVrS0Ml5A5/YUG5BLOJOrx2OR9yT9vkKW3CmUQ= +github.com/alexeyco/simpletable v0.0.0-20180729223640-1fa9009f1080/go.mod h1:gx4+gp4N5VWqThMIidoUMBNUCT4Pan3J8ETR1ParWUU= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/brianvoe/gofakeit v3.11.1+incompatible h1:JPPWjNa+KCDjSWAND25EPVUPSej4XHoyba8E6Z9jjJ4= +github.com/brianvoe/gofakeit v3.11.1+incompatible/go.mod h1:kfwdRA90vvNhPutZWfH7WPaDzUjz+CZFqG+rPkOjGOc= +github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/coreos/go-oidc v0.0.0-20180604232625-8ae1da518bd4 h1:jyma0abg+TRm1wgfS+n3+l9taXto1OUYdLKvlPw9ZEs= +github.com/coreos/go-oidc v0.0.0-20180604232625-8ae1da518bd4/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964 h1:y5HC9v93H5EPKqaS1UYVg1uYah5Xf51mBfIoWehClUQ= +github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964/go.mod h1:Xd9hchkHSWYkEqJwUGisez3G1QY8Ryz0sdWrLPMGjLk= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dlclark/regexp2 v1.1.6 h1:CqB4MjHw0MFCDj+PHHjiESmHX+N7t0tJzKvC6M97BRg= +github.com/dlclark/regexp2 v1.1.6/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/felixge/httpsnoop v1.0.0 h1:gh8fMGz0rlOv/1WmRZm7OgncIOTsAj21iNJot48omJQ= +github.com/felixge/httpsnoop v1.0.0/go.mod h1:3+D9sFq0ahK/JeJPhCBUV1xlf4/eIYrUQaxulT0VzX8= +github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BMXYYRWTLOJKlh+lOBt6nUQgXAfB7oVIQt5cNreqSLI= +github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:rZfgFAXFS/z/lEd6LJmf9HVZ1LkgYiHx5pHhV5DR16M= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-chi/chi v3.3.2+incompatible h1:uQNcQN3NsV1j4ANsPh42P4ew4t6rnRbJb8frvpp31qQ= +github.com/go-chi/chi v3.3.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-github v15.0.0+incompatible h1:jlPg2Cpsxb/FyEV/MFiIE9tW/2RAevQNZDPeHbf5a94= +github.com/google/go-github v15.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135 h1:zLTLjkaOFEFIOxY5BWLFLwh+cL8vOBW4XJ2aqLE/Tf0= +github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/gordonklaus/ineffassign v0.0.0-20180909121442-1003c8bd00dc h1:cJlkeAx1QYgO5N80aF5xRGstVsRQwgLR7uA2FnP1ZjY= +github.com/gordonklaus/ineffassign v0.0.0-20180909121442-1003c8bd00dc/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= +github.com/gorilla/context v0.0.0-20160226214623-1ea25387ff6f/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v1.2.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/graphql-go/graphql v0.7.5 h1:/JYC+NCUsSAfP/bVn1/ij8zvc7kzLwXUMyctSXdsE6o= +github.com/graphql-go/graphql v0.7.5/go.mod h1:k6yrAYQaSP59DC5UVxbgxESlmVyojThKdORUqGDGmrI= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce h1:xdsDDbiBDQTKASoGEZ+pEmF1OnWuu8AQ9I8iNbHNeno= +github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc= +github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= +github.com/jackc/pgx v0.0.0-20180811142536-aa561b8f3c54 h1:GiSrWbFxs4n0Fgq5TL/GNSzXSadQv/14BsFTnVWEc34= +github.com/jackc/pgx v0.0.0-20180811142536-aa561b8f3c54/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/joho/godotenv v1.2.0 h1:vGTvz69FzUFp+X4/bAkb0j5BoLC+9bpqTWY8mjhA9pc= +github.com/joho/godotenv v1.2.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lib/pq v0.0.0-20190326042056-d6156e141ac6 h1:faSzJmSgOhbgs/gWoEPhVr+mHTZWGFwiBgCW6/P49VM= +github.com/lib/pq v0.0.0-20190326042056-d6156e141ac6/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3 h1:ns/ykhmWi7G9O+8a448SecJU3nSMBXJfqQkl0upE1jI= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/mapstructure v0.0.0-20180203102830-a4e142e9c047/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 h1:J9b7z+QKAmPf4YLrFg6oQUotqHQeUNWwkvo7jZp1GLU= +github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rubenv/sql-migrate v0.0.0-20180618073704-aff46b65bb7f h1:Tx9nvIZDqpSZG6pp4zb3OWKGK3y5Af7L1LxmD0BI5Hw= +github.com/rubenv/sql-migrate v0.0.0-20180618073704-aff46b65bb7f/go.mod h1:WS0rl9eEliYI8DPnr3TOwz4439pay+qNgzJoVya/DmY= +github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/vfsgen v0.0.0-20180121065927-ffb13db8def0/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/spf13/afero v1.1.1 h1:Lt3ihYMlE+lreX1GS4Qw4ZsNpYQLxIXKBTEOXm3nt6I= +github.com/spf13/afero v1.1.1/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.2.0 h1:HHl1DSRbEQN2i8tJmtS6ViPyHx35+p51amrdsiTCrkg= +github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= +github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec h1:2ZXvIUGghLpdTVHR1UfvfrzoVlZaE/yOWC5LueIHZig= +github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.0.2 h1:Ncr3ZIuJn322w2k1qmzXDnkLAdQMlJqBa9kfAH+irso= +github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/ttacon/builder v0.0.0-20170518171403-c099f663e1c2 h1:5u+EJUQiosu3JFX0XS0qTf5FznsMOzTjGqavBGuCbo0= +github.com/ttacon/builder v0.0.0-20170518171403-c099f663e1c2/go.mod h1:4kyMkleCiLkgY6z8gK5BkI01ChBtxR0ro3I1ZDcGM3w= +github.com/ttacon/libphonenumber v1.0.1 h1:sYxYtW16xbklwUA3tJjTGMInEMLYClJjiIX4b7t5Ip0= +github.com/ttacon/libphonenumber v1.0.1/go.mod h1:E0TpmdVMq5dyVlQ7oenAkhsLu86OkUl+yR4OAxyEg/M= +github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/vbauerster/mpb v3.3.2+incompatible h1:IAXNkJBpRdoXCjjReAELWPon+JDp+7wpDUKKh6MyJdQ= +github.com/vbauerster/mpb v3.3.2+incompatible/go.mod h1:zAHG26FUhVKETRu+MWqYXcI70POlC6N8up9p1dID7SU= +github.com/vektah/dataloaden v0.2.0/go.mod h1:vxM6NuRlgiR0M6wbVTJeKp9vQIs81ZMfCYO+4yq/jbE= +github.com/vektah/gqlparser v1.1.2 h1:ZsyLGn7/7jDNI+y4SEhI4yAxRChlv15pUHMjijT+e68= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 h1:p/H982KKEjUnLJkM3tt/LemDnOc1GiZL5FCVlORJ5zo= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180404174746-b3c676e531a6/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6 h1:FP8hkuE6yUEaJnK7O2eTuejKWwW+Rhfj80dQ2JcKxCU= +golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA= +golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190429094411-2cc0cad0ac78 h1:ddnrbGOgFiC0zV+uaYoSkl0f47vnII6Zu426zWQrWkg= +golang.org/x/sys v0.0.0-20190429094411-2cc0cad0ac78/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190428024724-550556f78a90 h1:oGQGZoUHCvTWJLWXu4Qpp6uMF6gpOAGVm1hZx1KJkhU= +golang.org/x/tools v0.0.0-20190428024724-550556f78a90/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.3.2 h1:iTp+3yyl/KOtxa/d1/JUE0GGSoR6FuW5udver22iwpw= +google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7 h1:ZUjXAXmrAyrmmCPHgCA/vChHcpsX27MZ3yBonD/z1KE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/square/go-jose.v2 v2.1.6 h1:oB3Nsrhs3CNwP1t2WZ/eGtjH8BQhmcGx3zD8Lla+NjA= +gopkg.in/square/go-jose.v2 v2.1.6/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a h1:/8zB6iBfHCl1qAnEAWwGPNrUvapuy6CPla1VM0k8hQw= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +sourcegraph.com/sourcegraph/appdash v0.0.0-20180110180208-2cc67fd64755/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= +sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67/go.mod h1:L5q+DGLGOQFpo1snNEkLOJT2d1YTW66rWNzatr3He1k= diff --git a/grafana/grafana.go b/grafana/grafana.go new file mode 100644 index 0000000000..5f691558fa --- /dev/null +++ b/grafana/grafana.go @@ -0,0 +1,104 @@ +package grafana + +import ( + "encoding/json" + "net/http" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/target/goalert/alert" + "github.com/target/goalert/integrationkey" + "github.com/target/goalert/permission" + "github.com/target/goalert/retry" + "github.com/target/goalert/util/errutil" + "github.com/target/goalert/util/log" + "github.com/target/goalert/validation/validate" +) + +type grafanaPost struct { + RuleName string + RuleID int + Message string + State string + Title string + RuleURL string +} + +func clientError(w http.ResponseWriter, code int, err error) bool { + if err == nil { + return false + } + + http.Error(w, http.StatusText(code), code) + return true +} + +func GrafanaToEventsAPI(aDB alert.Store, intDB integrationkey.Store) http.HandlerFunc { + + return func(w http.ResponseWriter, r *http.Request) { + + ctx := r.Context() + + err := permission.LimitCheckAny(ctx, permission.Service) + if errutil.HTTPError(ctx, w, err) { + return + } + serviceID := permission.ServiceID(ctx) + + var g grafanaPost + err = json.NewDecoder(r.Body).Decode(&g) + if clientError(w, http.StatusBadRequest, err) { + log.Logf(ctx, "bad request from grafana: %v", err) + return + } + + ctx = log.WithFields(ctx, log.Fields{ + "RuleURL": g.RuleURL, + "State": g.State, + }) + + var grafanaState alert.Status + switch g.State { + case "alerting": + grafanaState = alert.StatusTriggered + case "ok": + grafanaState = alert.StatusClosed + case "no_data": + // no data.. + return + default: + log.Logf(ctx, "bad request from grafana: missing or invalid state") + http.Error(w, "invalid state", http.StatusBadRequest) + return + } + + var urlStr string + if validate.AbsoluteURL("RuleURL", g.RuleURL) == nil { + urlStr = g.RuleURL + } + body := strings.TrimSpace(urlStr + "\n\n" + g.Message) + + //dedupe is description, source, and serviceID + msg := &alert.Alert{ + Summary: validate.SanitizeText(g.RuleName, alert.MaxSummaryLength), + Details: validate.SanitizeText(body, alert.MaxDetailsLength), + Status: grafanaState, + Source: alert.SourceGrafana, + ServiceID: serviceID, + Dedup: alert.NewUserDedup(r.FormValue("dedup")), + } + + err = retry.DoTemporaryError(func(int) error { + _, err = aDB.CreateOrUpdate(ctx, msg) + return err + }, + retry.Log(ctx), + retry.Limit(10), + retry.FibBackoff(time.Second), + ) + if errutil.HTTPError(ctx, w, errors.Wrap(err, "create or update alert for grafana")) { + return + } + } +} diff --git a/graphql/alert.go b/graphql/alert.go new file mode 100644 index 0000000000..11c5aa16ec --- /dev/null +++ b/graphql/alert.go @@ -0,0 +1,785 @@ +package graphql + +import ( + "fmt" + "github.com/target/goalert/alert" + alertlog "github.com/target/goalert/alert/log" + "github.com/target/goalert/assignment" + "github.com/target/goalert/escalation" + "github.com/target/goalert/permission" + "github.com/target/goalert/schedule" + "github.com/target/goalert/user" + "github.com/target/goalert/validation" + "sort" + "strings" + "time" + + g "github.com/graphql-go/graphql" + "github.com/pkg/errors" +) + +type escalationPolicySnapshot struct { + Repeat int `json:"repeat"` + CurrentLevel int `json:"current_level"` + LastEscalation time.Time `json:"last_escalation"` + Steps []escalationPolicySnapshotStep `json:"steps"` +} + +type escalationPolicySnapshotStep struct { + DelayMinutes int `json:"delay_minutes"` + UserIDs []string `json:"user_ids"` + ScheduleIDs []string `json:"schedule_ids"` +} + +type alertLogSearchResult struct { + ID int `json:"id"` + AlertID int `json:"alert_id"` + Log string `json:"message"` + Timestamp time.Time `json:"timestamp"` + Subject *alertlog.Subject `json:"subject"` + Event alertlog.Type `json:"event"` +} + +func (h *Handler) alertLogSubjectFields() g.Fields { + return g.Fields{ + "id": &g.Field{Type: g.String}, + "name": &g.Field{Type: g.String}, + "type": &g.Field{Type: g.String}, + "classifier": &g.Field{Type: g.String}, + } +} + +var alertLogEventType = g.NewEnum(g.EnumConfig{ + Name: "AlertLogEventType", + Values: g.EnumValueConfigMap{ + "created": &g.EnumValueConfig{Value: alertlog.TypeCreated}, + "closed": &g.EnumValueConfig{Value: alertlog.TypeClosed}, + "escalated": &g.EnumValueConfig{Value: alertlog.TypeEscalated}, + "acknowledged": &g.EnumValueConfig{Value: alertlog.TypeAcknowledged}, + "escalation_request": &g.EnumValueConfig{Value: alertlog.TypeEscalationRequest}, + "notification_sent": &g.EnumValueConfig{Value: alertlog.TypeNotificationSent}, + "policy_updated": &g.EnumValueConfig{Value: alertlog.TypePolicyUpdated}, + "duplicate_suppressed": &g.EnumValueConfig{Value: alertlog.TypeDuplicateSupressed}, + }, +}) + +func getEPSStep(src interface{}) (*escalationPolicySnapshotStep, error) { + switch s := src.(type) { + case *escalationPolicySnapshotStep: + return s, nil + case escalationPolicySnapshotStep: + return &s, nil + default: + return nil, errors.Errorf("could not id of EPS Step (unknown source type %T)", s) + } +} +func getAlertSummary(p interface{}) (*alert.Summary, error) { + switch s := p.(type) { + case alert.Summary: + return &s, nil + case *alert.Summary: + return s, nil + default: + return nil, errors.Errorf("could not get Summary (unknown source type %T)", s) + } +} + +func (h *Handler) resolveUsersFromIDs(p g.ResolveParams) (interface{}, error) { + src, err := getEPSStep(p.Source) + if err != nil { + return nil, err + } + + users := make([]user.User, 0, len(src.UserIDs)) + var u *user.User + for _, id := range src.UserIDs { + u, err = h.c.UserStore.FindOne(p.Context, id) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + users = append(users, *u) + } + return users, nil +} + +func (h *Handler) resolveSchedulesFromIDs(p g.ResolveParams) (interface{}, error) { + src, err := getEPSStep(p.Source) + if err != nil { + return nil, err + } + + schedules := make([]schedule.Schedule, 0, len(src.ScheduleIDs)) + var s *schedule.Schedule + for _, id := range src.ScheduleIDs { + s, err = h.c.ScheduleStore.FindOne(p.Context, id) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + schedules = append(schedules, *s) + } + return schedules, nil +} + +func (h *Handler) alertSummaryFields() g.Fields { + return g.Fields{ + "service_id": &g.Field{Type: g.String}, + "service_name": &g.Field{Type: g.String}, + "service": &g.Field{ + Type: h.service, + Resolve: func(p g.ResolveParams) (interface{}, error) { + a, err := getAlertSummary(p.Source) + if err != nil { + return nil, err + } + + userID := permission.UserID(p.Context) + return newScrubber(p.Context).scrub(h.c.ServiceStore.FindOneForUser(p.Context, userID, a.ServiceID)) + }, + }, + + "totals": &g.Field{ + Type: g.NewObject(g.ObjectConfig{ + Name: "AlertTotals", + Fields: g.Fields{ + "unacknowledged": &g.Field{Type: g.Int}, + "acknowledged": &g.Field{Type: g.Int}, + "closed": &g.Field{Type: g.Int}, + }, + }), + }, + } +} + +func (h *Handler) alertFields() g.Fields { + return g.Fields{ + "_id": &g.Field{Type: g.Int}, + "id": &g.Field{ + Type: g.String, + Description: "Provides a unique string-version of id for use with Relay in the form of Alert(id).", + Resolve: func(p g.ResolveParams) (interface{}, error) { + a, err := getAlert(p.Source) + if err != nil { + return nil, err + } + + return fmt.Sprintf("Alert(%d)", a.ID), nil + }, + }, + "assignments": &g.Field{ + Type: g.NewList(h.user), + }, + + "status": &g.Field{Type: alertStatus, DeprecationReason: "Use the 'status_2' field instead."}, + "status_2": &g.Field{ + Type: alertStatus, + Resolve: func(p g.ResolveParams) (interface{}, error) { + a, err := getAlert(p.Source) + if err != nil { + return "", err + } + return a.Status, nil + }, + }, + "description": &g.Field{ + Type: g.String, + DeprecationReason: "Use the 'summary' and 'details' fields instead.", + Resolve: func(p g.ResolveParams) (interface{}, error) { + a, err := getAlert(p.Source) + if err != nil { + return nil, err + } + + return a.Description(), nil + }, + }, + "source": &g.Field{Type: g.String}, + "service_id": &g.Field{Type: g.String}, + "service_name": &g.Field{ + Type: g.String, + Resolve: func(p g.ResolveParams) (interface{}, error) { + a, err := getAlert(p.Source) + if err != nil { + return nil, err + } + s, err := h.c.ServiceStore.FindOneForUser(p.Context, permission.UserID(p.Context), a.ServiceID) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + return s.Name, nil + }, + }, + "service": &g.Field{ + Type: h.service, + Resolve: func(p g.ResolveParams) (interface{}, error) { + a, err := getAlert(p.Source) + if err != nil { + return nil, err + } + + userID := permission.UserID(p.Context) + return newScrubber(p.Context).scrub(h.c.ServiceStore.FindOneForUser(p.Context, userID, a.ServiceID)) + }, + }, + + "summary": &g.Field{Type: g.String}, + "details": &g.Field{Type: g.String}, + + "escalation_level": &g.Field{Type: g.Int, Description: "The total number of escalations for this alert.", + Resolve: func(p g.ResolveParams) (interface{}, error) { + a, err := getAlert(p.Source) + if err != nil { + return nil, err + } + l, err := h.c.AlertStore.State(p.Context, []int{a.ID}) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + if len(l) == 0 { + return -1, nil + } + return l[0].StepNumber, nil + }, + }, + + "logs": &g.Field{ + Type: g.NewList(h.alertLog), + Resolve: func(p g.ResolveParams) (interface{}, error) { + a, err := getAlert(p.Source) + if err != nil { + return nil, err + } + var opts alertlog.SearchOptions + opts.AlertID = a.ID + opts.Limit = 50 + entries, _, err := h.c.AlertLogStore.Search(p.Context, &opts) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + custom := make([]alertLogSearchResult, len(entries)) + for i, e := range entries { + custom[i].AlertID = e.AlertID() + custom[i].Log = e.String() + custom[i].ID = e.ID() + custom[i].Timestamp = e.Timestamp() + custom[i].Subject = e.Subject() + } + return custom, nil + }, + DeprecationReason: "Use the 'logs_2' field instead.", + }, + "logs_2": &g.Field{ + Type: g.NewList(h.alertLog), + Resolve: func(p g.ResolveParams) (interface{}, error) { + a, err := getAlert(p.Source) + if err != nil { + return nil, err + } + + var opts alertlog.SearchOptions + opts.AlertID = a.ID + entries, _, err := h.c.AlertLogStore.Search(p.Context, &opts) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + custom := make([]alertLogSearchResult, len(entries)) + for i, e := range entries { + custom[i].AlertID = e.AlertID() + custom[i].Log = e.String() + custom[i].ID = e.ID() + custom[i].Timestamp = e.Timestamp() + custom[i].Subject = e.Subject() + } + return custom, nil + }, + }, + "created_at": &g.Field{Type: ISOTimestamp}, + "escalation_policy_snapshot": &g.Field{ + Type: g.NewObject(g.ObjectConfig{ + Name: "EscalationPolicySnapshot", + Description: "Snapshot of an escalation policy used by an Alert.", + Fields: g.Fields{ + "repeat": &g.Field{Type: g.Int}, + "current_level": &g.Field{Type: g.Int}, + "last_escalation": &g.Field{Type: ISOTimestamp}, + "steps": &g.Field{Type: g.NewList(g.NewObject(g.ObjectConfig{ + Name: "EscalationPolicySnapshotStep", + Fields: g.Fields{ + "delay_minutes": &g.Field{Type: g.Int}, + "user_ids": &g.Field{Type: g.NewList(g.String)}, + "users": &g.Field{Type: g.NewList(h.user), Resolve: h.resolveUsersFromIDs}, + "schedule_ids": &g.Field{Type: g.NewList(g.String)}, + "schedules": &g.Field{Type: g.NewList(h.schedule), Resolve: h.resolveSchedulesFromIDs}, + }, + }))}, + }, + }), + Resolve: func(p g.ResolveParams) (interface{}, error) { + a, err := getAlert(p.Source) + if err != nil { + return nil, err + } + + polID, err := h.c.Resolver.AlertEPID(p.Context, a.ID) + + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + + if polID == "" { + return nil, fmt.Errorf("no escalation policy found") + } + + pol, err := h.c.EscalationStore.FindOnePolicy(p.Context, polID) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + act, err := h.c.EscalationStore.ActiveStep(p.Context, a.ID, polID) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + steps, err := h.c.EscalationStore.FindAllSteps(p.Context, polID) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + sort.Slice(steps, func(i, j int) bool { return steps[i].StepNumber < steps[j].StepNumber }) + + if act == nil { + act = &escalation.ActiveStep{ + AlertID: a.ID, + LastEscalation: time.Now(), + PolicyID: polID, + } + if len(steps) > 0 { + act.StepID = steps[0].ID + } + } + + eps := &escalationPolicySnapshot{ + Repeat: pol.Repeat, + LastEscalation: act.LastEscalation, + } + + for _, step := range steps { + if step.ID == act.StepID { + eps.CurrentLevel = len(steps)*act.LoopCount + step.StepNumber + } + epss := escalationPolicySnapshotStep{ + DelayMinutes: step.DelayMinutes, + } + asn, err := h.c.EscalationStore.FindAllStepTargets(p.Context, step.ID) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + for _, a := range asn { + switch a.TargetType() { + case assignment.TargetTypeUser: + epss.UserIDs = append(epss.UserIDs, a.TargetID()) + case assignment.TargetTypeSchedule: + epss.ScheduleIDs = append(epss.ScheduleIDs, a.TargetID()) + } + } + eps.Steps = append(eps.Steps, epss) + } + + return eps, nil + }, + }, + } +} + +func getAlert(src interface{}) (*alert.Alert, error) { + switch a := src.(type) { + case alert.Alert: + return &a, nil + case *alert.Alert: + return a, nil + default: + return nil, fmt.Errorf("could not resolve id of alert (unknown source type %T)", a) + } +} + +func (h *Handler) updateAlertStatusByServiceField() *g.Field { + return &g.Field{ + Type: g.NewList(h.alert), + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "UpdateAlertStatusByServiceInput", + Fields: g.InputObjectConfigFieldMap{ + "service_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String), Description: "The ID of the service"}, + "status": &g.InputObjectFieldConfig{Type: g.NewNonNull(alertStatus)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + serviceID, _ := m["service_id"].(string) + status, _ := m["status"].(alert.Status) + + return newScrubber(p.Context).scrub(nil, h.c.AlertStore.UpdateStatusByService(p.Context, serviceID, status)) + }, + } +} + +func (h *Handler) escalateAlertField() *g.Field { + return &g.Field{ + Type: h.alert, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "EscalateAlertInput", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Int), Description: "The ID of the alert (_id field in GraphQL)"}, + "current_escalation_level": &g.InputObjectFieldConfig{ + Type: g.NewNonNull(g.Int), + Description: "The current escalation level of the alert.", + }, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + id, _ := m["id"].(int) + lvl, _ := m["current_escalation_level"].(int) + err := h.c.AlertStore.Escalate(p.Context, id, lvl) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + + return newScrubber(p.Context).scrub(h.c.AlertStore.FindOne(p.Context, id)) + }, + } +} + +var alertStatus = g.NewEnum(g.EnumConfig{ + Name: "AlertStatus", + Values: g.EnumValueConfigMap{ + "unacknowledged": &g.EnumValueConfig{Value: alert.StatusTriggered}, + "acknowledged": &g.EnumValueConfig{Value: alert.StatusActive}, + "closed": &g.EnumValueConfig{Value: alert.StatusClosed}, + }, +}) + +func getAlertStatus(m map[string]interface{}) alert.Status { + s, ok := m["status"].(alert.Status) + if ok { + return s + } + + s2, ok := m["status_2"].(alert.Status) + if ok { + return s2 + } + + return "" +} + +func (h *Handler) createAlertField() *g.Field { + return &g.Field{ + Type: h.alert, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Description: "because bugs.", + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "CreateAlertInput", + Fields: g.InputObjectConfigFieldMap{ + "status": &g.InputObjectFieldConfig{Type: alertStatus}, + "status_2": &g.InputObjectFieldConfig{Type: alertStatus}, + "description": &g.InputObjectFieldConfig{Type: g.String}, + "summary": &g.InputObjectFieldConfig{Type: g.String}, + "details": &g.InputObjectFieldConfig{Type: g.String}, + "service_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + var a alert.Alert + a.Status = getAlertStatus(m) + + if string(a.Status) == "" { + a.Status = alert.StatusTriggered + } + a.Summary, _ = m["summary"].(string) + a.Details, _ = m["details"].(string) + if a.Summary == "" { + desc, _ := m["description"].(string) + parts := strings.SplitN(desc, "\n", 2) + a.Summary = parts[0] + if len(parts) == 2 { + a.Details = parts[1] + } else { + a.Details = "" + } + } + + a.Source = alert.SourceManual + a.ServiceID, _ = m["service_id"].(string) + + return newScrubber(p.Context).scrub(h.c.AlertStore.CreateOrUpdate(p.Context, &a)) + }, + } +} + +func (h *Handler) updateStatusAlertField() *g.Field { + return &g.Field{ + Type: h.alert, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Description: "because bugs.", + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "UpdateAlertStatusInput", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Int)}, + "status": &g.InputObjectFieldConfig{Type: alertStatus}, + "status_2": &g.InputObjectFieldConfig{Type: alertStatus}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + id, _ := m["id"].(int) + status := getAlertStatus(m) + + if string(status) == "" { + status = alert.StatusTriggered + } + + err := h.c.AlertStore.UpdateStatus(p.Context, id, status) + if alert.IsAlreadyAcknowledged(err) || alert.IsAlreadyClosed(err) { + err = nil + } + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + return newScrubber(p.Context).scrub(h.c.AlertStore.FindOne(p.Context, id)) + }, + } +} + +func (h *Handler) alertField() *g.Field { + return &g.Field{ + Type: h.alert, + Args: g.FieldConfigArgument{ + "id": &g.ArgumentConfig{ + Type: g.NewNonNull(g.Int), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + id, ok := p.Args["id"].(int) + if !ok { + return nil, validation.NewFieldError("id", "required") + } + + return newScrubber(p.Context).scrub(h.c.AlertStore.FindOne(p.Context, id)) + }, + } +} + +func (h *Handler) alertsField() *g.Field { + return &g.Field{ + Name: "Alerts", + Type: g.NewList(h.alert), + DeprecationReason: "Use the 'alerts2' field instead.", + Resolve: func(p g.ResolveParams) (interface{}, error) { + a, _, err := h.c.AlertStore.LegacySearch(p.Context, nil) + return newScrubber(p.Context).scrub(a, err) + }, + } +} + +var alertSortByEnum = g.NewEnum(g.EnumConfig{ + Name: "AlertSortBy", + Values: g.EnumValueConfigMap{ + "status": &g.EnumValueConfig{Value: alert.SortByStatus}, + "id": &g.EnumValueConfig{Value: alert.SortByID}, + "created_at": &g.EnumValueConfig{Value: alert.SortByCreatedTime}, + "summary": &g.EnumValueConfig{Value: alert.SortBySummary}, + "service": &g.EnumValueConfig{Value: alert.SortByServiceName}, + }, +}) + +func (h *Handler) searchAlertsField() *g.Field { + return &g.Field{ + Name: "Alerts2", + Args: g.FieldConfigArgument{ + "options": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "AlertSearchOptions", + Fields: g.InputObjectConfigFieldMap{ + "search": &g.InputObjectFieldConfig{ + Type: g.String, + Description: "Searches for case-insensitive summary or service name substring match, or exact id match.", + }, + "service_id": &g.InputObjectFieldConfig{Type: g.String}, + "sort_by": &g.InputObjectFieldConfig{Type: alertSortByEnum}, + "sort_desc": &g.InputObjectFieldConfig{Type: g.Boolean}, + "omit_triggered": &g.InputObjectFieldConfig{Type: g.Boolean}, + "omit_active": &g.InputObjectFieldConfig{Type: g.Boolean}, + "omit_closed": &g.InputObjectFieldConfig{Type: g.Boolean}, + "limit": &g.InputObjectFieldConfig{ + Type: g.Int, + Description: "Defaulted to 50 if not supplied.", + }, + "offset": &g.InputObjectFieldConfig{Type: g.Int}, + "favorite_services_only": &g.InputObjectFieldConfig{Type: g.Boolean}, + }, + }), + }, + }, + Type: g.NewObject(g.ObjectConfig{ + Name: "AlertSearchResult", + Fields: g.Fields{ + "items": &g.Field{Type: g.NewList(h.alert)}, + "total_count": &g.Field{Type: g.Int}, + }, + }), + Resolve: func(p g.ResolveParams) (interface{}, error) { + var result struct { + Items []alert.Alert `json:"items"` + Total int `json:"total_count"` + } + + var opts alert.LegacySearchOptions + if m, ok := p.Args["options"].(map[string]interface{}); ok { + opts.Search, _ = m["search"].(string) + opts.ServiceID, _ = m["service_id"].(string) + opts.SortBy, _ = m["sort_by"].(alert.SortBy) + opts.SortDesc, _ = m["sort_desc"].(bool) + opts.OmitTriggered, _ = m["omit_triggered"].(bool) + opts.OmitActive, _ = m["omit_active"].(bool) + opts.OmitClosed, _ = m["omit_closed"].(bool) + opts.Limit, _ = m["limit"].(int) + opts.Offset, _ = m["offset"].(int) + if v, ok := m["favorite_services_only"].(bool); ok && v { + opts.FavoriteServicesOnlyUserID = permission.UserID(p.Context) + } + } + + a, ttl, err := h.c.AlertStore.LegacySearch(p.Context, &opts) + result.Items = a + result.Total = ttl + + return newScrubber(p.Context).scrub(result, err) + }, + } +} + +func (h *Handler) searchAlertLogsField() *g.Field { + return &g.Field{ + Name: "AlertLogs", + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "AlertLogSearchOptions", + Fields: g.InputObjectConfigFieldMap{ + "service_id": &g.InputObjectFieldConfig{Type: g.String}, + "alert_id": &g.InputObjectFieldConfig{Type: g.Int}, + "user_id": &g.InputObjectFieldConfig{Type: g.String}, + "integration_key_id": &g.InputObjectFieldConfig{Type: g.String}, + "start_time": &g.InputObjectFieldConfig{Type: g.String}, + "end_time": &g.InputObjectFieldConfig{Type: g.String}, + "event_type": &g.InputObjectFieldConfig{Type: alertLogEventType}, + "sort_by": &g.InputObjectFieldConfig{Type: alertLogsSortByEnum}, + "sort_desc": &g.InputObjectFieldConfig{Type: g.Boolean}, + "limit": &g.InputObjectFieldConfig{ + Type: g.Int, + Description: "Defaulted to 25 if not supplied, Maximum is 50.", + }, + "offset": &g.InputObjectFieldConfig{Type: g.Int}, + }, + }), + }, + }, + + Type: g.NewObject(g.ObjectConfig{ + Name: "AlertLogSearchResult", + Fields: g.Fields{ + "items": &g.Field{Type: g.NewList(h.alertLog)}, + "total_count": &g.Field{Type: g.Int}, + }, + }), + + Resolve: func(p g.ResolveParams) (interface{}, error) { + var list struct { + Items []alertLogSearchResult `json:"items"` + Total int `json:"total_count"` + } + + var opts alertlog.SearchOptions + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + opts.AlertID, _ = m["alert_id"].(int) + opts.ServiceID, _ = m["service_id"].(string) + opts.IntegrationKeyID, _ = m["integration_key_id"].(string) + opts.UserID, _ = m["user_id"].(string) + opts.Event, _ = m["event_type"].(alertlog.Type) + opts.Start, _ = m["start_time"].(time.Time) + opts.End, _ = m["end_time"].(time.Time) + opts.SortBy, _ = m["sort_by"].(alertlog.SortBy) + opts.SortDesc, _ = m["sort_desc"].(bool) + opts.Limit, _ = m["limit"].(int) + opts.Offset, _ = m["offset"].(int) + + entries, total, err := h.c.AlertLogStore.Search(p.Context, &opts) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + custom := make([]alertLogSearchResult, len(entries)) + for i, e := range entries { + custom[i].AlertID = e.AlertID() + custom[i].Log = e.String() + custom[i].ID = e.ID() + custom[i].Timestamp = e.Timestamp() + custom[i].Subject = e.Subject() + custom[i].Event = e.Type() + } + + list.Items = custom + list.Total = total + return newScrubber(p.Context).scrub(list, err) + }, + } +} + +func (h *Handler) alertSummariesField() *g.Field { + return &g.Field{ + Name: "AlertSummaries", + Type: g.NewList(h.alertSummary), + Resolve: func(p g.ResolveParams) (interface{}, error) { + return newScrubber(p.Context).scrub(h.c.AlertStore.FindAllSummary(p.Context)) + }, + } +} + +var alertLogsSortByEnum = g.NewEnum(g.EnumConfig{ + Name: "AlertLogsSortBy", + Values: g.EnumValueConfigMap{ + "timestamp": &g.EnumValueConfig{Value: alertlog.SortByTimestamp}, + "alert_id": &g.EnumValueConfig{Value: alertlog.SortByAlertID}, + "event_type": &g.EnumValueConfig{Value: alertlog.SortByEventType}, + "user_name": &g.EnumValueConfig{Value: alertlog.SortByUserName}, + }, +}) diff --git a/graphql/alertlog.go b/graphql/alertlog.go new file mode 100644 index 0000000000..73dbbefe4d --- /dev/null +++ b/graphql/alertlog.go @@ -0,0 +1,15 @@ +package graphql + +import ( + g "github.com/graphql-go/graphql" +) + +func (h *Handler) alertLogFields() g.Fields { + return g.Fields{ + "alert_id": &g.Field{Type: g.String}, + "timestamp": &g.Field{Type: ISOTimestamp}, + "event": &g.Field{Type: alertLogEventType}, + "message": &g.Field{Type: g.String}, + "subject": &g.Field{Type: h.alertLogSubject}, + } +} diff --git a/graphql/assignment.go b/graphql/assignment.go new file mode 100644 index 0000000000..8ba3479560 --- /dev/null +++ b/graphql/assignment.go @@ -0,0 +1,180 @@ +package graphql + +import ( + "fmt" + "github.com/target/goalert/assignment" + "strconv" + + g "github.com/graphql-go/graphql" +) + +var assignmentSourceType = g.NewEnum(g.EnumConfig{ + Name: "AssignmentSourceType", + Values: g.EnumValueConfigMap{ + "alert": &g.EnumValueConfig{Value: assignment.SrcTypeAlert}, + "escalation_policy_step": &g.EnumValueConfig{Value: assignment.SrcTypeEscalationPolicyStep}, + "rotation_participant": &g.EnumValueConfig{Value: assignment.SrcTypeRotationParticipant}, + "schedule_rule": &g.EnumValueConfig{Value: assignment.SrcTypeScheduleRule}, + "service": &g.EnumValueConfig{Value: assignment.SrcTypeService}, + "user": &g.EnumValueConfig{Value: assignment.SrcTypeUser}, + }, +}) + +var assignmentTargetType = g.NewEnum(g.EnumConfig{ + Name: "AssignmentTargetType", + Values: g.EnumValueConfigMap{ + "escalation_policy": &g.EnumValueConfig{Value: assignment.TargetTypeEscalationPolicy}, + "notification_policy": &g.EnumValueConfig{Value: assignment.TargetTypeNotificationPolicy}, + "rotation": &g.EnumValueConfig{Value: assignment.TargetTypeRotation}, + "service": &g.EnumValueConfig{Value: assignment.TargetTypeService}, + "schedule": &g.EnumValueConfig{Value: assignment.TargetTypeSchedule}, + "user": &g.EnumValueConfig{Value: assignment.TargetTypeUser}, + }, +}) + +func targetTypeField(t assignment.TargetType) *g.Field { + return &g.Field{ + Type: assignmentTargetType, + Resolve: func(p g.ResolveParams) (interface{}, error) { return t, nil }, + } +} + +func sourceTypeField(t assignment.SrcType) *g.Field { + return &g.Field{ + Type: assignmentSourceType, + Resolve: func(p g.ResolveParams) (interface{}, error) { return t, nil }, + } +} + +func (h *Handler) sourceField() *g.Field { + return &g.Field{ + Type: h.sourceType, + Resolve: func(p g.ResolveParams) (interface{}, error) { + if t, ok := p.Source.(assignment.Source); ok { + switch t.SourceType() { + case assignment.SrcTypeAlert: + id, err := strconv.Atoi(t.SourceID()) + if err != nil { + return nil, err + } + return newScrubber(p.Context).scrub(h.c.AlertStore.FindOne(p.Context, id)) + case assignment.SrcTypeEscalationPolicyStep: + return newScrubber(p.Context).scrub(h.c.EscalationStore.FindOneStep(p.Context, t.SourceID())) + case assignment.SrcTypeRotationParticipant: + return newScrubber(p.Context).scrub(h.c.RotationStore.FindParticipant(p.Context, t.SourceID())) + case assignment.SrcTypeScheduleRule: + case assignment.SrcTypeService: + return newScrubber(p.Context).scrub(h.c.ServiceStore.FindOne(p.Context, t.SourceID())) + case assignment.SrcTypeUser: + return newScrubber(p.Context).scrub(h.c.UserStore.FindOne(p.Context, t.SourceID())) + } + } + return p.Source, nil + }, + } +} + +func (h *Handler) targetField() *g.Field { + return &g.Field{ + Type: h.targetType, + Resolve: func(p g.ResolveParams) (interface{}, error) { + if t, ok := p.Source.(assignment.Target); ok { + switch t.TargetType() { + case assignment.TargetTypeEscalationPolicy: + return newScrubber(p.Context).scrub(h.c.EscalationStore.FindOnePolicy(p.Context, t.TargetID())) + case assignment.TargetTypeNotificationPolicy: + case assignment.TargetTypeRotation: + return newScrubber(p.Context).scrub(h.c.RotationStore.FindRotation(p.Context, t.TargetID())) + case assignment.TargetTypeSchedule: + return newScrubber(p.Context).scrub(h.c.ScheduleStore.FindOne(p.Context, t.TargetID())) + case assignment.TargetTypeService: + return newScrubber(p.Context).scrub(h.c.ServiceStore.FindOne(p.Context, t.TargetID())) + case assignment.TargetTypeUser: + return newScrubber(p.Context).scrub(h.c.UserStore.FindOne(p.Context, t.TargetID())) + } + } + return p.Source, nil + }, + } +} + +func (h *Handler) assignmentSourceFields() g.Fields { + return g.Fields{ + "source_id": &g.Field{ + Type: g.String, + Resolve: func(p g.ResolveParams) (interface{}, error) { + if s, ok := p.Source.(assignment.Source); ok { + return s.SourceID(), nil + } + return nil, fmt.Errorf("invalid source type %T", p.Source) + }, + }, + "source_type": &g.Field{ + Type: assignmentSourceType, + Resolve: func(p g.ResolveParams) (interface{}, error) { + if s, ok := p.Source.(assignment.Source); ok { + return s.SourceType(), nil + } + return nil, fmt.Errorf("invalid source type %T", p.Source) + }, + }, + "source": h.sourceField(), + } +} + +func (h *Handler) assignmentTargetFields() g.Fields { + return g.Fields{ + "target_id": &g.Field{ + Type: g.String, + Resolve: func(p g.ResolveParams) (interface{}, error) { + if s, ok := p.Source.(assignment.Target); ok { + return s.TargetID(), nil + } + return nil, fmt.Errorf("invalid target type %T", p.Source) + }, + }, + "target_type": &g.Field{ + Type: assignmentTargetType, + Resolve: func(p g.ResolveParams) (interface{}, error) { + if s, ok := p.Source.(assignment.Target); ok { + return s.TargetType(), nil + } + return nil, fmt.Errorf("invalid target type %T", p.Source) + }, + }, + "target": h.targetField(), + "target_name": &g.Field{ + Type: g.String, + Resolve: func(p g.ResolveParams) (interface{}, error) { + var scrub = newScrubber(p.Context).scrub + if t, ok := p.Source.(assignment.TargetNamer); ok && t.TargetName() != "" { + return t.TargetName(), nil + } + + if t, ok := p.Source.(assignment.Target); ok { + switch t.TargetType() { + case assignment.TargetTypeUser: + tgt, err := h.c.UserStore.FindOne(p.Context, t.TargetID()) + if err != nil { + return scrub(nil, err) + } + return tgt.Name, nil + case assignment.TargetTypeRotation: + tgt, err := h.c.RotationStore.FindRotation(p.Context, t.TargetID()) + if err != nil { + return scrub(nil, err) + } + return tgt.Name, nil + case assignment.TargetTypeSchedule: + tgt, err := h.c.ScheduleStore.FindOne(p.Context, t.TargetID()) + if err != nil { + return scrub(nil, err) + } + return tgt.Name, nil + } + } + return nil, nil + }, + }, + } +} diff --git a/graphql/cache.go b/graphql/cache.go new file mode 100644 index 0000000000..c0077e2234 --- /dev/null +++ b/graphql/cache.go @@ -0,0 +1,209 @@ +package graphql + +import ( + "context" + "github.com/target/goalert/alert" + "github.com/target/goalert/escalation" + "github.com/target/goalert/schedule" + "github.com/target/goalert/schedule/rotation" + "github.com/target/goalert/service" + "github.com/target/goalert/user" + "github.com/target/goalert/user/contactmethod" + "github.com/target/goalert/user/notificationrule" + "github.com/target/goalert/util" + "strconv" +) + +func cachedConfig(c Config) Config { + cache := util.NewContextCache() + c.UserStore = &userCache{Store: c.UserStore, c: cache} + c.AlertStore = &alertCache{Store: c.AlertStore, c: cache} + c.ServiceStore = &serviceCache{Store: c.ServiceStore, c: cache} + c.EscalationStore = &escalationCache{Store: c.EscalationStore, c: cache} + c.ScheduleStore = &scheduleCache{Store: c.ScheduleStore, c: cache} + c.RotationStore = &rotationCache{Store: c.RotationStore, c: cache} + c.NRStore = &nrCache{Store: c.NRStore, c: cache} + c.CMStore = &cmCache{Store: c.CMStore, c: cache} + return c +} + +type nrCache struct { + notificationrule.Store + c util.ContextCache +} + +func (n *nrCache) FindOne(ctx context.Context, id string) (*notificationrule.NotificationRule, error) { + v, err := n.c.LoadOrStore(ctx, "nr_find_one:"+id, func() (interface{}, error) { + return n.Store.FindOne(ctx, id) + }) + if err != nil { + return nil, err + } + return v.(*notificationrule.NotificationRule), nil +} +func (n *nrCache) FindAll(ctx context.Context, userID string) ([]notificationrule.NotificationRule, error) { + v, err := n.c.LoadOrStore(ctx, "nr_find_all:"+userID, func() (interface{}, error) { + return n.Store.FindAll(ctx, userID) + }) + if err != nil { + return nil, err + } + return v.([]notificationrule.NotificationRule), nil +} + +type cmCache struct { + contactmethod.Store + c util.ContextCache +} + +func (c *cmCache) FindOne(ctx context.Context, id string) (*contactmethod.ContactMethod, error) { + v, err := c.c.LoadOrStore(ctx, "cm_find_one:"+id, func() (interface{}, error) { + return c.Store.FindOne(ctx, id) + }) + if err != nil { + return nil, err + } + return v.(*contactmethod.ContactMethod), nil +} +func (c *cmCache) FindAll(ctx context.Context, userID string) ([]contactmethod.ContactMethod, error) { + v, err := c.c.LoadOrStore(ctx, "cm_find_all:"+userID, func() (interface{}, error) { + return c.Store.FindAll(ctx, userID) + }) + if err != nil { + return nil, err + } + return v.([]contactmethod.ContactMethod), nil +} + +type rotationCache struct { + rotation.Store + c util.ContextCache +} + +func (r *rotationCache) FindAllRotations(ctx context.Context) ([]rotation.Rotation, error) { + v, err := r.c.LoadOrStore(ctx, "rotation_find_all", func() (interface{}, error) { + return r.Store.FindAllRotations(ctx) + }) + if err != nil { + return nil, err + } + return v.([]rotation.Rotation), nil +} +func (r *rotationCache) FindRotation(ctx context.Context, id string) (*rotation.Rotation, error) { + v, err := r.c.LoadOrStore(ctx, "rotation_find_one:"+id, func() (interface{}, error) { + return r.Store.FindRotation(ctx, id) + }) + if err != nil { + return nil, err + } + return v.(*rotation.Rotation), nil +} +func (r *rotationCache) FindParticipant(ctx context.Context, id string) (*rotation.Participant, error) { + v, err := r.c.LoadOrStore(ctx, "rotation_find_one_participant:"+id, func() (interface{}, error) { + return r.Store.FindParticipant(ctx, id) + }) + if err != nil { + return nil, err + } + return v.(*rotation.Participant), nil +} +func (r *rotationCache) FindAllParticipants(ctx context.Context, rotID string) ([]rotation.Participant, error) { + v, err := r.c.LoadOrStore(ctx, "rotation_find_all_participants:"+rotID, func() (interface{}, error) { + return r.Store.FindAllParticipants(ctx, rotID) + }) + if err != nil { + return nil, err + } + return v.([]rotation.Participant), nil +} + +type scheduleCache struct { + schedule.Store + c util.ContextCache +} + +func (s *scheduleCache) FindOne(ctx context.Context, id string) (*schedule.Schedule, error) { + v, err := s.c.LoadOrStore(ctx, "schedule_find_one:"+id, func() (interface{}, error) { + return s.Store.FindOne(ctx, id) + }) + if err != nil { + return nil, err + } + return v.(*schedule.Schedule), nil +} + +type escalationCache struct { + escalation.Store + c util.ContextCache +} + +func (s *escalationCache) FindOnePolicy(ctx context.Context, id string) (*escalation.Policy, error) { + v, err := s.c.LoadOrStore(ctx, "escalation_find_one_policy:"+id, func() (interface{}, error) { + return s.Store.FindOnePolicy(ctx, id) + }) + if err != nil { + return nil, err + } + return v.(*escalation.Policy), nil +} +func (s *escalationCache) FindOneStep(ctx context.Context, id string) (*escalation.Step, error) { + v, err := s.c.LoadOrStore(ctx, "escalation_find_one_step:"+id, func() (interface{}, error) { + return s.Store.FindOneStep(ctx, id) + }) + if err != nil { + return nil, err + } + return v.(*escalation.Step), nil +} +func (s *escalationCache) FindAllSteps(ctx context.Context, id string) ([]escalation.Step, error) { + v, err := s.c.LoadOrStore(ctx, "escalation_find_all_steps:"+id, func() (interface{}, error) { + return s.Store.FindAllSteps(ctx, id) + }) + if err != nil { + return nil, err + } + return v.([]escalation.Step), nil +} + +type serviceCache struct { + service.Store + c util.ContextCache +} + +func (s *serviceCache) FindOne(ctx context.Context, id string) (*service.Service, error) { + v, err := s.c.LoadOrStore(ctx, "service_fine_one:"+id, func() (interface{}, error) { + return s.Store.FindOne(ctx, id) + }) + if err != nil { + return nil, err + } + return v.(*service.Service), nil +} + +type alertCache struct { + alert.Store + c util.ContextCache +} + +func (a *alertCache) FindOne(ctx context.Context, id int) (*alert.Alert, error) { + v, err := a.c.LoadOrStore(ctx, "alert_find_one:"+strconv.Itoa(id), func() (interface{}, error) { + return a.Store.FindOne(ctx, id) + }) + if err != nil { + return nil, err + } + return v.(*alert.Alert), nil +} + +type userCache struct { + user.Store + c util.ContextCache +} + +func (u *userCache) FindOne(ctx context.Context, id string) (*user.User, error) { + v, err := u.c.LoadOrStore(ctx, "user_find_one:"+id, func() (interface{}, error) { return u.Store.FindOne(ctx, id) }) + if err != nil { + return nil, err + } + return v.(*user.User), nil +} diff --git a/graphql/config.go b/graphql/config.go new file mode 100644 index 0000000000..1d565cf7b0 --- /dev/null +++ b/graphql/config.go @@ -0,0 +1,55 @@ +package graphql + +import ( + "database/sql" + "github.com/target/goalert/alert" + alertlog "github.com/target/goalert/alert/log" + "github.com/target/goalert/engine/resolver" + "github.com/target/goalert/escalation" + "github.com/target/goalert/heartbeat" + "github.com/target/goalert/integrationkey" + "github.com/target/goalert/label" + "github.com/target/goalert/limit" + "github.com/target/goalert/notification" + "github.com/target/goalert/oncall" + "github.com/target/goalert/override" + "github.com/target/goalert/schedule" + "github.com/target/goalert/schedule/rotation" + "github.com/target/goalert/schedule/rule" + "github.com/target/goalert/schedule/shiftcalc" + "github.com/target/goalert/service" + "github.com/target/goalert/user" + "github.com/target/goalert/user/contactmethod" + "github.com/target/goalert/user/favorite" + "github.com/target/goalert/user/notificationrule" +) + +type Config struct { + DB *sql.DB + + AlertStore alert.Store + AlertLogStore alertlog.Store + UserStore user.Store + CMStore contactmethod.Store + NRStore notificationrule.Store + ServiceStore service.Store + + ScheduleStore schedule.Store + ScheduleRuleStore rule.Store + RotationStore rotation.Store + ShiftCalc shiftcalc.Calculator + + EscalationStore escalation.Store + IntegrationKeyStore integrationkey.Store + HeartbeatStore heartbeat.Store + + LimitStore limit.Store + + OverrideStore override.Store + + Resolver resolver.Resolver + NotificationStore notification.Store + UserFavoriteStore favorite.Store + LabelStore label.Store + OnCallStore oncall.Store +} diff --git a/graphql/contactmethod.go b/graphql/contactmethod.go new file mode 100644 index 0000000000..0fe585f82a --- /dev/null +++ b/graphql/contactmethod.go @@ -0,0 +1,246 @@ +package graphql + +import ( + "errors" + "github.com/target/goalert/user/contactmethod" + + g "github.com/graphql-go/graphql" +) + +func (h *Handler) CMFields() g.Fields { + return g.Fields{ + "id": &g.Field{Type: g.String}, + "name": &g.Field{Type: g.String}, + "type": &g.Field{Type: g.String}, + "value": &g.Field{Type: g.String}, + "disabled": &g.Field{Type: g.Boolean}, + } +} + +var contactType = g.NewEnum(g.EnumConfig{ + Name: "ContactType", + Values: g.EnumValueConfigMap{ + "VOICE": &g.EnumValueConfig{Value: contactmethod.TypeVoice}, + "SMS": &g.EnumValueConfig{Value: contactmethod.TypeSMS}, + "EMAIL": &g.EnumValueConfig{Value: contactmethod.TypeEmail}, + "PUSH": &g.EnumValueConfig{Value: contactmethod.TypePush}, + }, +}) + +func (h *Handler) updateContactMethodField() *g.Field { + return &g.Field{ + Type: h.contactMethod, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Description: "because bugs.", + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "UpdateContactMethodInput", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "name": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "type": &g.InputObjectFieldConfig{Type: g.NewNonNull(contactType)}, + "value": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "disabled": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Boolean)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + var c contactmethod.ContactMethod + c.ID, _ = m["id"].(string) + c.Name, _ = m["name"].(string) + c.Type, _ = m["type"].(contactmethod.Type) + c.Value, _ = m["value"].(string) + c.Disabled, _ = m["disabled"].(bool) + + err := h.c.CMStore.Update(p.Context, &c) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + return newScrubber(p.Context).scrub(h.c.CMStore.FindOne(p.Context, c.ID)) + }, + } +} + +func (h *Handler) createContactMethodField() *g.Field { + return &g.Field{ + Type: h.contactMethod, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Description: "because bugs.", + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "CreateContactMethodInput", + Fields: g.InputObjectConfigFieldMap{ + "name": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "type": &g.InputObjectFieldConfig{Type: g.NewNonNull(contactType)}, + "value": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "disabled": &g.InputObjectFieldConfig{Type: g.Boolean}, + "user_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + var c contactmethod.ContactMethod + c.Name, _ = m["name"].(string) + c.Type, _ = m["type"].(contactmethod.Type) + c.Value, _ = m["value"].(string) + c.Disabled, _ = m["disabled"].(bool) + c.UserID, _ = m["user_id"].(string) + + return newScrubber(p.Context).scrub(h.c.CMStore.Insert(p.Context, &c)) + }, + } +} + +func (h *Handler) deleteContactMethodField() *g.Field { + return &g.Field{ + Type: g.NewObject(g.ObjectConfig{ + Name: "DeleteContactMethodOutput", + Fields: g.Fields{"deleted_id": &g.Field{Type: g.String}}, + }), + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Description: "because bugs.", + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "DeleteContactMethodInput", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + var c struct { + ID string `json:"deleted_id"` + } + + c.ID, _ = m["id"].(string) + + err := h.c.CMStore.Delete(p.Context, c.ID) + return newScrubber(p.Context).scrub(&c, err) + }, + } +} + +func (h *Handler) sendContactMethodTest() *g.Field { + return &g.Field{ + Type: g.NewObject(g.ObjectConfig{ + Name: "SendContactMethodTest", + Fields: g.Fields{"id": &g.Field{Type: g.String}}, + }), + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "SendContactMethodTestInput", + Fields: g.InputObjectConfigFieldMap{ + "contact_method_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + var result struct { + CMID string `json:"id"` + } + result.CMID, _ = m["contact_method_id"].(string) + + err := h.c.NotificationStore.SendContactMethodTest(p.Context, result.CMID) + return newScrubber(p.Context).scrub(result, err) + }, + } +} + +func (h *Handler) sendContactMethodVerification() *g.Field { + return &g.Field{ + Type: g.NewObject(g.ObjectConfig{ + Name: "SendContactMethodVerification", + Fields: g.Fields{"id": &g.Field{Type: g.String}}, + }), + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "SendContactMethodVerificationInput", + Fields: g.InputObjectConfigFieldMap{ + "contact_method_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "resend": &g.InputObjectFieldConfig{Type: g.Boolean}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + resend, _ := m["resend"].(bool) + var result struct { + CMID string `json:"id"` + } + result.CMID, _ = m["contact_method_id"].(string) + + err := h.c.NotificationStore.SendContactMethodVerification(p.Context, result.CMID, resend) + return newScrubber(p.Context).scrub(result, err) + }, + } +} + +func (h *Handler) verifyContactMethod() *g.Field { + return &g.Field{ + Type: g.NewObject(g.ObjectConfig{ + Name: "VerifyContactMethodOutput", + Fields: g.Fields{ + "contact_method_ids": &g.Field{Type: g.NewList(g.String), Description: "IDs of contact methods that have been enabled by this operation."}}, + }), + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "VerifyContactMethodInput", + Fields: g.InputObjectConfigFieldMap{ + "verification_code": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Int)}, + "contact_method_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + id, _ := m["contact_method_id"].(string) + var code int + code, _ = m["verification_code"].(int) + + changed, err := h.c.NotificationStore.VerifyContactMethod(p.Context, id, code) + var result struct { + IDs []string `json:"contact_method_ids"` + } + result.IDs = changed + return newScrubber(p.Context).scrub(result, err) + + }, + } +} diff --git a/graphql/createall.go b/graphql/createall.go new file mode 100644 index 0000000000..601dd445a7 --- /dev/null +++ b/graphql/createall.go @@ -0,0 +1,552 @@ +package graphql + +import ( + "github.com/target/goalert/assignment" + "github.com/target/goalert/escalation" + "github.com/target/goalert/heartbeat" + "github.com/target/goalert/integrationkey" + "github.com/target/goalert/override" + "github.com/target/goalert/schedule" + "github.com/target/goalert/schedule/rotation" + "github.com/target/goalert/schedule/rule" + "github.com/target/goalert/service" + "github.com/target/goalert/validation" + "github.com/target/goalert/validation/validate" + "time" + + g "github.com/graphql-go/graphql" +) + +func parseUO(_m interface{}) (*override.UserOverride, error) { + m, ok := _m.(map[string]interface{}) + if !ok { + return nil, nil + } + var o override.UserOverride + o.AddUserID, _ = m["add_user_id"].(string) + o.RemoveUserID, _ = m["remove_user_id"].(string) + o.Target = parseTarget(m) + + sTime, _ := m["start_time"].(string) + var err error + o.Start, err = time.Parse(time.RFC3339, sTime) + if err != nil { + return nil, validation.NewFieldError("start_time", "invalid format for time value: "+err.Error()) + } + eTime, _ := m["end_time"].(string) + o.End, err = time.Parse(time.RFC3339, eTime) + if err != nil { + return nil, validation.NewFieldError("end_time", "invalid format for time value: "+err.Error()) + } + + return &o, nil +} + +func parseEP(_m interface{}) *escalation.Policy { + m, ok := _m.(map[string]interface{}) + if !ok { + return nil + } + + var ep escalation.Policy + ep.ID, _ = m["id_placeholder"].(string) + ep.Name, _ = m["name"].(string) + ep.Description, _ = m["description"].(string) + ep.Repeat, _ = m["repeat"].(int) + + return &ep +} + +func parseEPStep(_m interface{}) *escalation.Step { + m, ok := _m.(map[string]interface{}) + if !ok { + return nil + } + + var step escalation.Step + step.DelayMinutes, _ = m["delay_minutes"].(int) + step.PolicyID, _ = m["escalation_policy_id"].(string) + + tgts, _ := m["targets"].([]interface{}) + for _, t := range tgts { + tgt := parseTarget(t) + if tgt == nil { + continue + } + + step.Targets = append(step.Targets, tgt) + } + + return &step +} + +func parseService(_m interface{}) *service.Service { + m, ok := _m.(map[string]interface{}) + if !ok { + return nil + } + + var s service.Service + s.ID, _ = m["id_placeholder"].(string) + s.Name, _ = m["name"].(string) + s.Description, _ = m["description"].(string) + s.EscalationPolicyID, _ = m["escalation_policy_id"].(string) + + return &s +} + +func parseHeartbeatMonitor(_m interface{}) *heartbeat.Monitor { + m, ok := _m.(map[string]interface{}) + if !ok { + return nil + } + + var hb heartbeat.Monitor + hb.Name, _ = m["name"].(string) + hb.IntervalMinutes, _ = m["interval_minutes"].(int) + hb.ServiceID, _ = m["service_id"].(string) + + return &hb +} + +func parseIntegrationKey(_m interface{}) *integrationkey.IntegrationKey { + m, ok := _m.(map[string]interface{}) + if !ok { + return nil + } + + var key integrationkey.IntegrationKey + key.Name, _ = m["name"].(string) + key.Type, _ = m["type"].(integrationkey.Type) + key.ServiceID, _ = m["service_id"].(string) + + return &key +} + +func parseRotation(_m interface{}) (*rotation.Rotation, error) { + m, ok := _m.(map[string]interface{}) + if !ok { + return nil, validation.NewFieldError("input", "invalid input object") + } + + var rot rotation.Rotation + rot.ID, _ = m["id_placeholder"].(string) + rot.Name, _ = m["name"].(string) + rot.Description, _ = m["description"].(string) + sTime, _ := m["start"].(string) + var err error + rot.Start, err = time.Parse(time.RFC3339, sTime) + if err != nil { + return nil, validation.NewFieldError("start", "invalid format for time value: "+err.Error()) + } + tz, _ := m["time_zone"].(string) + if tz == "" { + return nil, validation.NewFieldError("time_zone", "must not be empty") + } + loc, err := time.LoadLocation(tz) + if err != nil { + return nil, validation.NewFieldError("time_zone", err.Error()) + } + rot.Start = rot.Start.In(loc) + rot.Type = m["type"].(rotation.Type) + rot.ShiftLength = m["shift_length"].(int) + + return &rot, nil +} + +func parseRotationPart(_m interface{}) (*rotation.Participant, error) { + m, ok := _m.(map[string]interface{}) + if !ok { + return nil, validation.NewFieldError("input", "invalid input object") + } + + var rp rotation.Participant + rp.RotationID, _ = m["rotation_id"].(string) + rp.Target = assignment.UserTarget(m["user_id"].(string)) + + return &rp, nil +} + +func parseSched(_m interface{}) (*schedule.Schedule, error) { + m, ok := _m.(map[string]interface{}) + if !ok { + return nil, validation.NewFieldError("input", "invalid input object") + } + + var s schedule.Schedule + s.ID, _ = m["id_placeholder"].(string) + s.Name, _ = m["name"].(string) + s.Description, _ = m["description"].(string) + + tz, _ := m["time_zone"].(string) + if tz == "" { + return nil, validation.NewFieldError("time_zone", "must not be empty") + } + + loc, err := time.LoadLocation(tz) + if err != nil { + return nil, validation.NewFieldError("time_zone", err.Error()) + } + + s.TimeZone = loc + + return &s, nil +} + +func parseSchedRule(_m interface{}) (*rule.Rule, error) { + m, ok := _m.(map[string]interface{}) + if !ok { + return nil, validation.NewFieldError("input", "invalid input object") + } + + var r rule.Rule + r.ScheduleID = m["schedule_id"].(string) + + var e bool + e, _ = m["sunday"].(bool) + r.SetDay(time.Sunday, e) + e, _ = m["monday"].(bool) + r.SetDay(time.Monday, e) + e, _ = m["tuesday"].(bool) + r.SetDay(time.Tuesday, e) + e, _ = m["wednesday"].(bool) + r.SetDay(time.Wednesday, e) + e, _ = m["thursday"].(bool) + r.SetDay(time.Thursday, e) + e, _ = m["friday"].(bool) + r.SetDay(time.Friday, e) + e, _ = m["saturday"].(bool) + r.SetDay(time.Saturday, e) + + startStr, _ := m["start"].(string) + endStr, _ := m["end"].(string) + var err error + r.Start, err = rule.ParseClock(startStr) + if err != nil { + return nil, validation.NewFieldError("start", err.Error()) + } + r.End, err = rule.ParseClock(endStr) + if err != nil { + return nil, validation.NewFieldError("end", err.Error()) + } + + r.Target = parseTarget(m["target"]) + + return &r, nil +} + +func parseTarget(_m interface{}) assignment.Target { + m, ok := _m.(map[string]interface{}) + if !ok { + return nil + } + var raw assignment.RawTarget + raw.ID, _ = m["target_id"].(string) + raw.Type, _ = m["target_type"].(assignment.TargetType) + + return &raw +} + +/* + * Creates a service, userTarget, rotation, or schedule, an escalation policy, + * and adds a step from the user, rot, or sched created. finally, generates an + * integration key to return + */ +func (h *Handler) createAllField() *g.Field { + return &g.Field{ + Type: h.createAll, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewNonNull(g.NewInputObject(g.InputObjectConfig{ + Name: "CreateAllInput", + Description: "Creates up to any number of escalation policies, steps, services, integration keys," + + "rotations, participants, schedules, and schedule rules.", + Fields: g.InputObjectConfigFieldMap{ + "escalation_policies": &g.InputObjectFieldConfig{ + Type: g.NewList(g.NewInputObject(g.InputObjectConfig{ + Name: "CreateAllEscalationPolicyInput", + Fields: g.InputObjectConfigFieldMap{ + "id_placeholder": &g.InputObjectFieldConfig{Type: g.String}, + "name": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "description": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "repeat": &g.InputObjectFieldConfig{Type: g.Int}, + }, + })), + }, + "escalation_policy_steps": &g.InputObjectFieldConfig{ + Type: g.NewList(g.NewInputObject(g.InputObjectConfig{ + Name: "CreateAllEscalationPolicyStepInput", + Fields: g.InputObjectConfigFieldMap{ + "escalation_policy_id": &g.InputObjectFieldConfig{ + Description: "The UUID of an existing policy or the value of an id_placeholder from the current request.", + Type: g.NewNonNull(g.String), + }, + "delay_minutes": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Int)}, + "targets": &g.InputObjectFieldConfig{ + Type: g.NewList(g.NewInputObject(g.InputObjectConfig{ + Name: "CreateAllEPStepTargetInput", + Fields: g.InputObjectConfigFieldMap{ + "target_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "target_type": &g.InputObjectFieldConfig{Type: g.NewNonNull(epStepTarget)}, + }, + })), + }, + }})), + }, + "services": &g.InputObjectFieldConfig{ + Type: g.NewList(g.NewInputObject(g.InputObjectConfig{ + Name: "CreateAllServiceInput", + Fields: g.InputObjectConfigFieldMap{ + "escalation_policy_id": &g.InputObjectFieldConfig{ + Description: "The UUID of an existing policy or the value of an id_placeholder from the current request.", + Type: g.NewNonNull(g.String), + }, + "id_placeholder": &g.InputObjectFieldConfig{Type: g.String}, + "name": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "description": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + })), + }, + "integration_keys": &g.InputObjectFieldConfig{ + Type: g.NewList(g.NewInputObject(g.InputObjectConfig{ + Name: "CreateAllIntegrationKeyInput", + Fields: g.InputObjectConfigFieldMap{ + "service_id": &g.InputObjectFieldConfig{ + Description: "The UUID of an existing service or the value of an id_placeholder from the current request.", + Type: g.NewNonNull(g.String), + }, + "name": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "type": &g.InputObjectFieldConfig{Type: g.NewNonNull(integrationKeyType)}, + }, + })), + }, + "heartbeat_monitors": &g.InputObjectFieldConfig{ + Type: g.NewList(g.NewInputObject(g.InputObjectConfig{ + Name: "CreateAllHeartbeatMonitorInput", + Fields: g.InputObjectConfigFieldMap{ + "service_id": &g.InputObjectFieldConfig{ + Description: "The UUID of an existing service or the value of an id_placeholder from the current request.", + Type: g.NewNonNull(g.String), + }, + "name": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "interval_minutes": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Int)}, + }, + })), + }, + "rotations": &g.InputObjectFieldConfig{ + Type: g.NewList(g.NewInputObject(g.InputObjectConfig{ + Name: "CreateAllRotationInput", + Fields: g.InputObjectConfigFieldMap{ + "id_placeholder": &g.InputObjectFieldConfig{Type: g.String}, + "name": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "description": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "time_zone": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "type": &g.InputObjectFieldConfig{Type: g.NewNonNull(rotationTypeEnum)}, + "start": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "shift_length": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Int)}, + }, + })), + }, + "rotation_participants": &g.InputObjectFieldConfig{ + Type: g.NewList(g.NewInputObject(g.InputObjectConfig{ + Name: "CreateAllRotationParticipantInput", + Fields: g.InputObjectConfigFieldMap{ + "rotation_id": &g.InputObjectFieldConfig{ + Description: "The UUID of an existing rotation or the value of an id_placeholder from the current request.", + Type: g.NewNonNull(g.String), + }, + "user_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + })), + }, + "schedules": &g.InputObjectFieldConfig{ + Type: g.NewList(g.NewInputObject(g.InputObjectConfig{ + Name: "CreateAllScheduleInput", + Fields: g.InputObjectConfigFieldMap{ + "id_placeholder": &g.InputObjectFieldConfig{Type: g.String}, + "name": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "description": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "time_zone": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + })), + }, + "user_overrides": &g.InputObjectFieldConfig{ + Type: g.NewList(g.NewInputObject(g.InputObjectConfig{ + Name: "CreateAllUserOverrideInput", + Fields: g.InputObjectConfigFieldMap{ + "target_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "target_type": &g.InputObjectFieldConfig{Type: g.NewNonNull(userOverrideTargetType)}, + "start_time": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "end_time": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "add_user_id": &g.InputObjectFieldConfig{Type: g.String}, + "remove_user_id": &g.InputObjectFieldConfig{Type: g.String}, + }, + })), + }, + "schedule_rules": &g.InputObjectFieldConfig{ + Type: g.NewList(g.NewInputObject(g.InputObjectConfig{ + Name: "CreateAllScheduleRuleInput", + Fields: g.InputObjectConfigFieldMap{ + "schedule_id": &g.InputObjectFieldConfig{ + Description: "The UUID of an existing schedule or the value of an id_placeholder from the current request.", + Type: g.NewNonNull(g.String), + }, + "sunday": &g.InputObjectFieldConfig{Type: g.Boolean}, + "monday": &g.InputObjectFieldConfig{Type: g.Boolean}, + "tuesday": &g.InputObjectFieldConfig{Type: g.Boolean}, + "wednesday": &g.InputObjectFieldConfig{Type: g.Boolean}, + "thursday": &g.InputObjectFieldConfig{Type: g.Boolean}, + "friday": &g.InputObjectFieldConfig{Type: g.Boolean}, + "saturday": &g.InputObjectFieldConfig{Type: g.Boolean}, + "start": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "end": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "target": &g.InputObjectFieldConfig{ + Type: g.NewNonNull(g.NewInputObject(g.InputObjectConfig{ + Name: "CreateAllScheduleRuleTargetInput", + Fields: g.InputObjectConfigFieldMap{ + "target_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "target_type": &g.InputObjectFieldConfig{Type: g.NewNonNull(schedRuleTarget)}, + }, + })), + }, + }})), + }, + }, + })), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, validation.NewFieldError("input", "expected object") + } + + scrub := newScrubber(p.Context).scrub + const limitTotal = 35 + var count int + var err error + + // parse everything + getSlice := func(s string) []interface{} { + v, _ := m[s].([]interface{}) + + count += len(v) + if count > limitTotal { + v = v[:0] + err = validate.Many(err, validation.NewFieldError(s, "too many items")) + } + + return v + } + + var data createAllData + + for _, v := range getSlice("escalation_policies") { + ep := parseEP(v) + if ep != nil { + data.EscalationPolicies = append(data.EscalationPolicies, *ep) + } + } + + for _, v := range getSlice("escalation_policy_steps") { + step := parseEPStep(v) + if step != nil { + data.EscalationPolicySteps = append(data.EscalationPolicySteps, *step) + } + } + + for _, v := range getSlice("services") { + serv := parseService(v) + if serv != nil { + data.Services = append(data.Services, *serv) + } + } + + for _, v := range getSlice("integration_keys") { + key := parseIntegrationKey(v) + if key != nil { + data.IntegrationKeys = append(data.IntegrationKeys, *key) + } + } + + for _, v := range getSlice("rotations") { + rot, err := parseRotation(v) + if err != nil { + return scrub(nil, err) + } + if rot != nil { + data.Rotations = append(data.Rotations, *rot) + } + } + + for _, v := range getSlice("rotation_participants") { + rp, err := parseRotationPart(v) + if err != nil { + return scrub(nil, err) + } + if rp != nil { + data.RotationParticipants = append(data.RotationParticipants, *rp) + } + } + + for _, v := range getSlice("schedules") { + sched, err := parseSched(v) + if err != nil { + return scrub(nil, err) + } + if sched != nil { + data.Schedules = append(data.Schedules, *sched) + } + } + + for _, v := range getSlice("user_overrides") { + o, err := parseUO(v) + if err != nil { + return scrub(nil, err) + } + if o != nil { + data.UserOverrides = append(data.UserOverrides, *o) + } + } + + for _, v := range getSlice("schedule_rules") { + r, err := parseSchedRule(v) + if err != nil { + return scrub(nil, err) + } + if r != nil { + data.ScheduleRules = append(data.ScheduleRules, *r) + } + } + + for _, v := range getSlice("heartbeat_monitors") { + r := parseHeartbeatMonitor(v) + if r != nil { + data.HeartbeatMonitors = append(data.HeartbeatMonitors, *r) + } + } + + if err != nil { + return nil, err + } + + // create & return everything + return scrub(h.c.createAll(p.Context, &data)) + }, + } +} + +func (h *Handler) createAllFields() g.Fields { + return g.Fields{ + "escalation_policies": &g.Field{Type: g.NewList(h.escalationPolicy)}, + "escalation_policy_steps": &g.Field{Type: g.NewList(h.escalationPolicyStep)}, + "services": &g.Field{Type: g.NewList(h.service)}, + "integration_keys": &g.Field{Type: g.NewList(h.integrationKey)}, + "rotations": &g.Field{Type: g.NewList(h.rotation)}, + "rotation_participants": &g.Field{Type: g.NewList(h.rotationParticipant)}, + "schedules": &g.Field{Type: g.NewList(h.schedule)}, + "schedule_rules": &g.Field{Type: g.NewList(h.scheduleRule)}, + "heartbeat_monitors": &g.Field{Type: g.NewList(h.heartbeat)}, + "user_overrides": &g.Field{Type: g.NewList(h.userOverride)}, + } +} diff --git a/graphql/createallutil.go b/graphql/createallutil.go new file mode 100644 index 0000000000..27e76a0435 --- /dev/null +++ b/graphql/createallutil.go @@ -0,0 +1,206 @@ +package graphql + +import ( + "context" + "github.com/target/goalert/assignment" + "github.com/target/goalert/escalation" + "github.com/target/goalert/heartbeat" + "github.com/target/goalert/integrationkey" + "github.com/target/goalert/override" + "github.com/target/goalert/permission" + "github.com/target/goalert/schedule" + "github.com/target/goalert/schedule/rotation" + "github.com/target/goalert/schedule/rule" + "github.com/target/goalert/service" + "github.com/target/goalert/validation" +) + +type createAllData struct { + EscalationPolicies []escalation.Policy `json:"escalation_policies"` + EscalationPolicySteps []escalation.Step `json:"escalation_policy_steps"` + Services []service.Service `json:"services"` + IntegrationKeys []integrationkey.IntegrationKey `json:"integration_keys"` + Rotations []rotation.Rotation `json:"rotations"` + RotationParticipants []rotation.Participant `json:"rotation_participants"` + Schedules []schedule.Schedule `json:"schedules"` + ScheduleRules []rule.Rule `json:"schedule_rules"` + HeartbeatMonitors []heartbeat.Monitor `json:"heartbeat_monitors"` + UserOverrides []override.UserOverride `json:"user_overrides"` +} + +func (c *Config) createAll(ctx context.Context, data *createAllData) (*createAllData, error) { + ids := make(map[string]string) + setID := func(s, v string) error { + if s == "" { + return nil + } + if _, ok := ids[s]; ok { + return validation.NewFieldError("duplicate value '%s'", s) + } + ids[s] = v + return nil + } + setID("__current_user", permission.UserID(ctx)) + + getID := func(s string) string { + if s == "" { + return "" + } + id, ok := ids[s] + if ok { + return id + } + return s + } + + getTarget := func(tgt assignment.Target) assignment.Target { + id, ok := ids[tgt.TargetID()] + if ok { + return &assignment.RawTarget{ + ID: id, + Type: tgt.TargetType(), + } + } + return tgt + } + + tx, err := c.DB.BeginTx(ctx, nil) + if err != nil { + return nil, err + } + defer tx.Rollback() + + var result createAllData + + // create escalation policies + for _, ep := range data.EscalationPolicies { + newEP, err := c.EscalationStore.CreatePolicyTx(ctx, tx, &ep) + if err != nil { + return nil, err + } + err = setID(ep.ID, newEP.ID) + if err != nil { + return nil, err + } + result.EscalationPolicies = append(result.EscalationPolicies, *newEP) + } + + // create services + for _, serv := range data.Services { + serv.EscalationPolicyID = getID(serv.EscalationPolicyID) + newServ, err := c.ServiceStore.CreateServiceTx(ctx, tx, &serv) + if err != nil { + return nil, err + } + err = setID(serv.ID, newServ.ID) + if err != nil { + return nil, err + } + result.Services = append(result.Services, *newServ) + } + + // create integration keys + for _, key := range data.IntegrationKeys { + key.ServiceID = getID(key.ServiceID) + newKey, err := c.IntegrationKeyStore.CreateKeyTx(ctx, tx, &key) + if err != nil { + return nil, err + } + result.IntegrationKeys = append(result.IntegrationKeys, *newKey) + } + + // create heartbeat monitors + for _, hb := range data.HeartbeatMonitors { + hb.ServiceID = getID(hb.ServiceID) + newHB, err := c.HeartbeatStore.CreateTx(ctx, tx, &hb) + if err != nil { + return nil, err + } + result.HeartbeatMonitors = append(result.HeartbeatMonitors, *newHB) + } + + // create rotations + for _, rot := range data.Rotations { + newRot, err := c.RotationStore.CreateRotationTx(ctx, tx, &rot) + if err != nil { + return nil, err + } + err = setID(rot.ID, newRot.ID) + if err != nil { + return nil, err + } + result.Rotations = append(result.Rotations, *newRot) + } + + // add rotation participants + for _, rp := range data.RotationParticipants { + rp.RotationID = getID(rp.RotationID) + rp.Target = getTarget(rp.Target) + + newRP, err := c.RotationStore.AddParticipantTx(ctx, tx, &rp) + if err != nil { + return nil, err + } + if err != nil { + return nil, err + } + result.RotationParticipants = append(result.RotationParticipants, *newRP) + } + + // create schedules + for _, sched := range data.Schedules { + newSched, err := c.ScheduleStore.CreateScheduleTx(ctx, tx, &sched) + if err != nil { + return nil, err + } + err = setID(sched.ID, newSched.ID) + if err != nil { + return nil, err + } + result.Schedules = append(result.Schedules, *newSched) + } + + // create user overrides + for _, o := range data.UserOverrides { + o.AddUserID = getID(o.AddUserID) + o.RemoveUserID = getID(o.RemoveUserID) + o.Target = getTarget(o.Target) + newO, err := c.OverrideStore.CreateUserOverrideTx(ctx, tx, &o) + if err != nil { + return nil, err + } + result.UserOverrides = append(result.UserOverrides, *newO) + } + + // create rules for schedule(s, depending on placeholder ids) + for _, r := range data.ScheduleRules { + r.ScheduleID = getID(r.ScheduleID) + r.Target = getTarget(r.Target) + + newRule, err := c.ScheduleRuleStore.CreateRuleTx(ctx, tx, &r) + if err != nil { + return nil, err + } + result.ScheduleRules = append(result.ScheduleRules, *newRule) + } + + // create steps for escalation policy(s) + for _, step := range data.EscalationPolicySteps { + step.PolicyID = getID(step.PolicyID) + newStep, err := c.EscalationStore.CreateStepTx(ctx, tx, &step) + if err != nil { + return nil, err + } + result.EscalationPolicySteps = append(result.EscalationPolicySteps, *newStep) + + for _, tgt := range step.Targets { + tgt = getTarget(tgt) + err = c.EscalationStore.AddStepTargetTx(ctx, tx, newStep.ID, tgt) + if err != nil { + return nil, err + } + } + } + + return &result, tx.Commit() +} diff --git a/graphql/deleteall.go b/graphql/deleteall.go new file mode 100644 index 0000000000..fe593262bc --- /dev/null +++ b/graphql/deleteall.go @@ -0,0 +1,96 @@ +package graphql + +import ( + "github.com/target/goalert/validation" + "github.com/target/goalert/validation/validate" + + g "github.com/graphql-go/graphql" +) + +func (h *Handler) deleteAllField() *g.Field { + return &g.Field{ + Type: h.deleteAll, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewNonNull(g.NewInputObject(g.InputObjectConfig{ + Name: "DeleteAllInput", + Description: "Deletes up to any number of escalation policies, steps, services, integration keys," + + "rotations, participants, schedules, and schedule rules.", + Fields: g.InputObjectConfigFieldMap{ + "escalation_policy_ids": &g.InputObjectFieldConfig{Type: g.NewList(g.String)}, + "escalation_policy_step_ids": &g.InputObjectFieldConfig{Type: g.NewList(g.String)}, + "service_ids": &g.InputObjectFieldConfig{Type: g.NewList(g.String)}, + "integration_key_ids": &g.InputObjectFieldConfig{Type: g.NewList(g.String)}, + "rotation_ids": &g.InputObjectFieldConfig{Type: g.NewList(g.String)}, + "rotation_participant_ids": &g.InputObjectFieldConfig{Type: g.NewList(g.String)}, + "schedule_ids": &g.InputObjectFieldConfig{Type: g.NewList(g.String)}, + "schedule_rule_ids": &g.InputObjectFieldConfig{Type: g.NewList(g.String)}, + "heartbeat_monitor_ids": &g.InputObjectFieldConfig{Type: g.NewList(g.String)}, + "user_override_ids": &g.InputObjectFieldConfig{Type: g.NewList(g.String)}, + }, + })), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, validation.NewFieldError("input", "expected object") + } + + scrub := newScrubber(p.Context).scrub + const limitTotal = 35 + var count int + var err error + + // parse everything + getSlice := func(s string) []string { + v, _ := m[s].([]interface{}) + + count += len(v) + if count > limitTotal { + v = v[:0] + err = validate.Many(err, validation.NewFieldError(s, "too many items")) + } + var strs []string + for _, i := range v { + if str, ok := i.(string); ok { + strs = append(strs, str) + } else { + err = validate.Many(err, validation.NewFieldError(s, "expected string")) + } + } + + return strs + } + + var data deleteAllData + data.EscalationPolicyIDs = getSlice("escalation_policy_ids") + data.EscalationPolicyStepIDs = getSlice("escalation_policy_step_ids") + data.ServiceIDs = getSlice("service_ids") + data.IntegrationKeyIDs = getSlice("integration_key_ids") + data.RotationIDs = getSlice("rotation_ids") + data.RotationParticipantIDs = getSlice("rotation_participant_ids") + data.ScheduleIDs = getSlice("schedule_ids") + data.ScheduleRuleIDs = getSlice("schedule_rule_ids") + data.HeartbeatMonitorIDs = getSlice("heartbeat_monitor_ids") + data.UserOverrideIDs = getSlice("user_override_ids") + + return scrub(data, h.c.deleteAll(p.Context, &data)) + }, + } +} + +func (h *Handler) deleteAllFields() g.Fields { + return g.Fields{ + "escalation_policy_ids": &g.Field{Type: g.NewList(g.String)}, + "escalation_policy_step_ids": &g.Field{Type: g.NewList(g.String)}, + "service_ids": &g.Field{Type: g.NewList(g.String)}, + "integration_key_ids": &g.Field{Type: g.NewList(g.String)}, + "rotation_ids": &g.Field{Type: g.NewList(g.String)}, + "rotation_participant_ids": &g.Field{Type: g.NewList(g.String)}, + "schedule_ids": &g.Field{Type: g.NewList(g.String)}, + "schedule_rule_ids": &g.Field{Type: g.NewList(g.String)}, + "heartbeat_monitor_ids": &g.Field{Type: g.NewList(g.String)}, + "user_override_ids": &g.Field{Type: g.NewList(g.String)}, + } +} diff --git a/graphql/deleteallutil.go b/graphql/deleteallutil.go new file mode 100644 index 0000000000..783c6114f9 --- /dev/null +++ b/graphql/deleteallutil.go @@ -0,0 +1,64 @@ +package graphql + +import ( + "context" + "database/sql" +) + +type deleteAllData struct { + EscalationPolicyIDs []string `json:"escalation_policie_ids"` + EscalationPolicyStepIDs []string `json:"escalation_policy_step_ids"` + ServiceIDs []string `json:"service_ids"` + IntegrationKeyIDs []string `json:"integration_key_ids"` + RotationIDs []string `json:"rotation_ids"` + RotationParticipantIDs []string `json:"rotation_participant_ids"` + ScheduleIDs []string `json:"schedule_ids"` + ScheduleRuleIDs []string `json:"schedule_rule_ids"` + HeartbeatMonitorIDs []string `json:"heartbeat_monitor_ids"` + UserOverrideIDs []string `json:"user_override_ids"` +} + +func (c *Config) deleteAll(ctx context.Context, data *deleteAllData) error { + tx, err := c.DB.BeginTx(ctx, nil) + if err != nil { + return err + } + defer tx.Rollback() + + deleteIDs := func(fn func(context.Context, *sql.Tx, string) error, ids []string) { + if err != nil { + return + } + for _, id := range ids { + err = fn(ctx, tx, id) + if err != nil { + return + } + } + } + + deleteIDs(func(ctx context.Context, tx *sql.Tx, id string) error { + return c.OverrideStore.DeleteUserOverrideTx(ctx, tx, id) + }, data.UserOverrideIDs) + deleteIDs(c.HeartbeatStore.DeleteTx, data.HeartbeatMonitorIDs) + deleteIDs(c.ScheduleRuleStore.DeleteTx, data.ScheduleRuleIDs) + deleteIDs(c.IntegrationKeyStore.DeleteTx, data.IntegrationKeyIDs) + deleteIDs(func(ctx context.Context, tx *sql.Tx, id string) error { + _, err := c.EscalationStore.DeleteStepTx(ctx, tx, id) + return err + }, data.EscalationPolicyStepIDs) + deleteIDs(func(ctx context.Context, tx *sql.Tx, id string) error { + _, err := c.RotationStore.RemoveParticipantTx(ctx, tx, id) + return err + }, data.RotationParticipantIDs) + deleteIDs(c.ScheduleStore.DeleteTx, data.ScheduleIDs) + deleteIDs(c.RotationStore.DeleteRotationTx, data.RotationIDs) + deleteIDs(c.ServiceStore.DeleteTx, data.ServiceIDs) + deleteIDs(c.EscalationStore.DeletePolicyTx, data.EscalationPolicyIDs) + + if err != nil { + return err + } + + return tx.Commit() +} diff --git a/graphql/escalation.go b/graphql/escalation.go new file mode 100644 index 0000000000..b53d3e664c --- /dev/null +++ b/graphql/escalation.go @@ -0,0 +1,586 @@ +package graphql + +import ( + "fmt" + "github.com/target/goalert/assignment" + "github.com/target/goalert/escalation" + "github.com/target/goalert/schedule" + "github.com/target/goalert/user" + "github.com/target/goalert/validation" + + g "github.com/graphql-go/graphql" + "github.com/lib/pq" + "github.com/pkg/errors" +) + +func getPolicy(src interface{}) (*escalation.Policy, error) { + switch t := src.(type) { + case escalation.Policy: + return &t, nil + case *escalation.Policy: + return t, nil + default: + return nil, fmt.Errorf("invalid source type for escalation policy %T", t) + } +} + +func getStep(src interface{}) (*escalation.Step, error) { + switch t := src.(type) { + case escalation.Step: + return &t, nil + case *escalation.Step: + return t, nil + default: + return nil, fmt.Errorf("invalid source type for escalation step %T", t) + } +} + +func (h *Handler) escalationPolicyField() *g.Field { + return &g.Field{ + Type: h.escalationPolicy, + Args: g.FieldConfigArgument{ + "id": &g.ArgumentConfig{ + Type: g.NewNonNull(g.String), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + id, ok := p.Args["id"].(string) + if !ok { + return nil, validation.NewFieldError("id", "required") + } + + return newScrubber(p.Context).scrub(h.c.EscalationStore.FindOnePolicy(p.Context, id)) + }, + } +} + +func (h *Handler) escalationPoliciesField() *g.Field { + return &g.Field{ + Name: "EscalationPolicies", + Type: g.NewList(h.escalationPolicy), + Resolve: func(p g.ResolveParams) (interface{}, error) { + return newScrubber(p.Context).scrub(h.c.EscalationStore.FindAllPolicies(p.Context)) + }, + } +} + +func (h *Handler) createOrUpdateEscalationPolicyField() *g.Field { + return &g.Field{ + Type: h.escalationPolicy, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "CreateOrUpdateEscalationPolicyInput", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.String}, + "name": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "description": &g.InputObjectFieldConfig{Type: g.String}, + "repeat": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Int)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input") + } + + ep := new(escalation.Policy) + ep.ID, _ = m["id"].(string) + ep.Name, _ = m["name"].(string) + ep.Description, _ = m["description"].(string) + ep.Repeat, _ = m["repeat"].(int) + + if ep.ID == "" { + return newScrubber(p.Context).scrub(h.c.EscalationStore.CreatePolicy(p.Context, ep)) + } + + return newScrubber(p.Context).scrub(ep, h.c.EscalationStore.UpdatePolicy(p.Context, ep)) + }, + } +} + +func (h *Handler) deleteEscalationPolicyField() *g.Field { + return &g.Field{ + Type: g.NewObject(g.ObjectConfig{ + Name: "DeleteEscalationPolicyOutput", + Fields: g.Fields{"deleted_id": &g.Field{Type: g.String}}, + }), + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "DeleteEscalationPolicyInput", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + var n struct { + ID string `json:"deleted_id"` + } + + n.ID, _ = m["id"].(string) + + err := h.c.EscalationStore.DeletePolicy(p.Context, n.ID) + + // Code 23503 corresponds to: "foreign_key_violation" + // https://www.postgresql.org/docs/9.6/static/errcodes-appendix.html + if e, ok := errors.Cause(err).(*pq.Error); ok && e.Code == "23503" && e.Constraint == "services_escalation_policy_id_fkey" { + return nil, errors.New("policy is currently in use by one or more services") + } + return newScrubber(p.Context).scrub(&n, err) + }, + } +} + +func (h *Handler) escalationPolicyFields() g.Fields { + return g.Fields{ + "id": &g.Field{Type: g.String}, + "name": &g.Field{Type: g.String}, + "description": &g.Field{Type: g.String}, + "repeat": &g.Field{Type: g.Int}, + "target_type": targetTypeField(assignment.TargetTypeEscalationPolicy), + "services": &g.Field{ + Type: g.NewList(h.service), + Description: "List of services currently assigned to this escalation policy.", + Resolve: func(p g.ResolveParams) (interface{}, error) { + ep, err := getPolicy(p.Source) + if err != nil { + return nil, err + } + scrub := newScrubber(p.Context).scrub + + return scrub(h.c.ServiceStore.FindAllByEP(p.Context, ep.ID)) + }, + }, + + "steps": &g.Field{ + Type: g.NewList(h.escalationPolicyStep), + Resolve: func(p g.ResolveParams) (interface{}, error) { + ep, err := getPolicy(p.Source) + if err != nil { + return nil, err + } + + return newScrubber(p.Context).scrub(h.c.EscalationStore.FindAllSteps(p.Context, ep.ID)) + }, + }, + } +} + +func (h *Handler) escalationPolicyStepFields() g.Fields { + return g.Fields{ + "id": &g.Field{Type: g.String}, + "escalation_policy_id": &g.Field{Type: g.String}, + "delay_minutes": &g.Field{Type: g.Int}, + "step_number": &g.Field{Type: g.Int}, + "user_ids": &g.Field{ + Type: g.NewList(g.String), + DeprecationReason: "Use the 'targets' field instead.", + Resolve: func(p g.ResolveParams) (interface{}, error) { + s, err := getStep(p.Source) + if err != nil { + return nil, err + } + tgts, err := h.c.EscalationStore.FindAllStepTargets(p.Context, s.ID) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + result := make([]string, 0, len(tgts)) + for _, t := range tgts { + if t.TargetType() != assignment.TargetTypeUser { + continue + } + result = append(result, t.TargetID()) + } + return result, err + }, + }, + "schedule_ids": &g.Field{ + Type: g.NewList(g.String), + DeprecationReason: "Use the 'targets' field instead.", + Resolve: func(p g.ResolveParams) (interface{}, error) { + s, err := getStep(p.Source) + if err != nil { + return nil, err + } + tgts, err := h.c.EscalationStore.FindAllStepTargets(p.Context, s.ID) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + + result := make([]string, 0, len(tgts)) + for _, t := range tgts { + if t.TargetType() != assignment.TargetTypeSchedule { + continue + } + result = append(result, t.TargetID()) + } + return result, err + }, + }, + "source_type": sourceTypeField(assignment.SrcTypeEscalationPolicyStep), + "source": h.sourceField(), + + "escalation_policy": &g.Field{ + Type: h.escalationPolicy, + Resolve: func(p g.ResolveParams) (interface{}, error) { + s, err := getStep(p.Source) + if err != nil { + return nil, err + } + + return newScrubber(p.Context).scrub(h.c.EscalationStore.FindOnePolicy(p.Context, s.PolicyID)) + }, + }, + + "users": &g.Field{ + Type: g.NewList(h.user), + DeprecationReason: "Use the 'targets' field instead.", + Resolve: func(p g.ResolveParams) (interface{}, error) { + s, err := getStep(p.Source) + if err != nil { + return nil, err + } + var result []user.User + var u *user.User + tgts, err := h.c.EscalationStore.FindAllStepTargets(p.Context, s.ID) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + + for _, t := range tgts { + if t.TargetType() != assignment.TargetTypeUser { + continue + } + u, err = h.c.UserStore.FindOne(p.Context, t.TargetID()) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + result = append(result, *u) + } + + return result, nil + }, + }, + + "targets": &g.Field{ + Type: g.NewList(h.assignmentTarget), + Resolve: func(p g.ResolveParams) (interface{}, error) { + s, err := getStep(p.Source) + if err != nil { + return nil, err + } + tgts, err := h.c.EscalationStore.FindAllStepTargets(p.Context, s.ID) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + + return tgts, nil + }, + }, + + "schedules": &g.Field{ + Type: g.NewList(h.schedule), + DeprecationReason: "Use the 'targets' field instead.", + Resolve: func(p g.ResolveParams) (interface{}, error) { + s, err := getStep(p.Source) + if err != nil { + return nil, err + } + tgts, err := h.c.EscalationStore.FindAllStepTargets(p.Context, s.ID) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + + var result []schedule.Schedule + var u *schedule.Schedule + for _, t := range tgts { + if t.TargetType() != assignment.TargetTypeSchedule { + continue + } + u, err = h.c.ScheduleStore.FindOne(p.Context, t.TargetID()) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + result = append(result, *u) + } + + return result, nil + }, + }, + } +} + +var epStepTarget = g.NewEnum(g.EnumConfig{ + Name: "EscalationPolicyStepTarget", + Values: g.EnumValueConfigMap{ + "user": &g.EnumValueConfig{Value: assignment.TargetTypeUser}, + "schedule": &g.EnumValueConfig{Value: assignment.TargetTypeSchedule}, + "rotation": &g.EnumValueConfig{Value: assignment.TargetTypeRotation}, + }, +}) + +func (h *Handler) addEscalationPolicyStepTargetField() *g.Field { + return &g.Field{ + Type: h.assignmentTarget, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "AddEscalationPolicyStepTargetInput", + Fields: g.InputObjectConfigFieldMap{ + "step_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "target_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "target_type": &g.InputObjectFieldConfig{Type: g.NewNonNull(epStepTarget)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + var tgt assignment.RawTarget + tgt.ID, _ = m["target_id"].(string) + tgt.Type, _ = m["target_type"].(assignment.TargetType) + stepID, _ := m["step_id"].(string) + + err := h.c.EscalationStore.AddStepTarget(p.Context, stepID, tgt) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + + return tgt, nil + }, + } +} + +func (h *Handler) deleteEscalationPolicyStepTargetField() *g.Field { + return &g.Field{ + Type: g.NewObject(g.ObjectConfig{ + Name: "DeleteEscalationPolicyStepTargetOutput", + Fields: g.Fields{ + "target_id": &g.Field{Type: g.String, Description: "ID of the target."}, + "target_type": &g.Field{Type: epStepTarget, Description: "The type of the target."}, + }, + }), + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "DeleteEscalationPolicyStepTargetInput", + Fields: g.InputObjectConfigFieldMap{ + "step_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "target_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "target_type": &g.InputObjectFieldConfig{Type: g.NewNonNull(epStepTarget)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + var tgt assignment.RawTarget + tgt.ID, _ = m["target_id"].(string) + tgt.Type, _ = m["target_type"].(assignment.TargetType) + stepID, _ := m["step_id"].(string) + err := h.c.EscalationStore.DeleteStepTarget(p.Context, stepID, tgt) + return newScrubber(p.Context).scrub(tgt, err) + }, + } +} + +func (h *Handler) createOrUpdateEscalationPolicyStepField() *g.Field { + return &g.Field{ + Type: g.NewObject(g.ObjectConfig{ + Name: "CreateOrUpdateEscalationPolicyStepOutput", + Fields: g.Fields{ + "created": &g.Field{Type: g.Boolean, Description: "Signifies if a new record was created."}, + "escalation_policy_step": &g.Field{Type: h.escalationPolicyStep, Description: "The created or updated record."}, + }, + }), + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "CreateOrUpdateEscalationPolicyStepInput", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.String}, + "escalation_policy_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "delay_minutes": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Int)}, + "user_ids": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.NewList(g.String))}, + "schedule_ids": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.NewList(g.String))}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + var r struct { + Created bool `json:"created"` + S *escalation.Step `json:"escalation_policy_step"` + } + r.S = new(escalation.Step) + var hasID bool + r.S.ID, hasID = m["id"].(string) + r.S.PolicyID, _ = m["escalation_policy_id"].(string) + r.S.DelayMinutes, _ = m["delay_minutes"].(int) + + userIDs, _ := m["user_ids"].([]interface{}) + schedIDs, _ := m["schedule_ids"].([]interface{}) + + asn := make([]assignment.Target, 0, len(userIDs)+len(schedIDs)) + for _, _id := range userIDs { + if id, ok := _id.(string); ok { + asn = append(asn, assignment.UserTarget(id)) + } + } + + for _, _id := range schedIDs { + if id, ok := _id.(string); ok { + asn = append(asn, assignment.ScheduleTarget(id)) + } + } + + scrub := newScrubber(p.Context).scrub + var err error + if !hasID { + r.S, err = h.c.EscalationStore.CreateStep(p.Context, r.S) + r.Created = true + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + for _, tgt := range asn { + err = h.c.EscalationStore.AddStepTarget(p.Context, r.S.ID, tgt) + if err != nil { + return scrub(nil, err) + } + } + return r, nil + } + + err = h.c.EscalationStore.UpdateStep(p.Context, r.S) + if err != nil { + return scrub(nil, err) + } + old, err := h.c.EscalationStore.FindAllStepTargets(p.Context, r.S.ID) + if err != nil { + return scrub(nil, err) + } + + added := make(map[assignment.RawTarget]bool, len(asn)) + for _, tgt := range asn { + added[assignment.RawTarget{Type: tgt.TargetType(), ID: tgt.TargetID()}] = true + err = h.c.EscalationStore.AddStepTarget(p.Context, r.S.ID, tgt) + if err != nil { + return scrub(nil, err) + } + } + + for _, tgt := range old { + if added[assignment.RawTarget{Type: tgt.TargetType(), ID: tgt.TargetID()}] { + continue + } + if tgt.TargetType() == assignment.TargetTypeRotation { + continue + } + + err = h.c.EscalationStore.DeleteStepTarget(p.Context, r.S.ID, tgt) + if err != nil { + return scrub(nil, err) + } + } + + r.S, err = h.c.EscalationStore.FindOneStep(p.Context, r.S.ID) + return scrub(r, err) + }, + } +} + +func (h *Handler) deleteEscalationPolicyStepField() *g.Field { + return &g.Field{ + Description: "Remove a step from an escalation policy.", + Type: g.NewObject(g.ObjectConfig{ + Name: "DeleteEscalationPolicyStepOutput", + Fields: g.Fields{ + "deleted_id": &g.Field{Type: g.String}, + "escalation_policy_id": &g.Field{Type: g.String}, + }, + }), + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "DeleteEscalationPolicyStepInput", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + var r struct { + ID string `json:"deleted_id"` + EPID string `json:"escalation_policy_id"` + } + r.ID, _ = m["id"].(string) + var err error + r.EPID, err = h.c.EscalationStore.DeleteStep(p.Context, r.ID) + return newScrubber(p.Context).scrub(r, err) + }, + } +} + +func (h *Handler) moveEscalationPolicyStepField() *g.Field { + return &g.Field{ + Description: "Moves a step to new_position, automatically shifting other participants around.", + Type: g.NewList(h.escalationPolicyStep), + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "MoveEscalationPolicyStepInput", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "new_position": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Int)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + id, _ := m["id"].(string) + newPos, _ := m["new_position"].(int) + + err := h.c.EscalationStore.MoveStep(p.Context, id, newPos) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + + eps, err := h.c.EscalationStore.FindOneStep(p.Context, id) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + return newScrubber(p.Context).scrub(h.c.EscalationStore.FindAllSteps(p.Context, eps.PolicyID)) + }, + } +} diff --git a/graphql/handler.go b/graphql/handler.go new file mode 100644 index 0000000000..95f9751386 --- /dev/null +++ b/graphql/handler.go @@ -0,0 +1,317 @@ +package graphql + +import ( + "context" + "encoding/json" + "github.com/target/goalert/assignment" + "github.com/target/goalert/escalation" + "github.com/target/goalert/permission" + "github.com/target/goalert/schedule" + "github.com/target/goalert/schedule/rotation" + "github.com/target/goalert/schedule/shiftcalc" + "github.com/target/goalert/service" + "github.com/target/goalert/user" + "github.com/target/goalert/util/errutil" + "github.com/target/goalert/util/log" + "io/ioutil" + "net/http" + "sort" + + g "github.com/graphql-go/graphql" + "github.com/graphql-go/graphql/language/ast" + "github.com/graphql-go/graphql/language/parser" + "github.com/graphql-go/graphql/language/source" + "github.com/pkg/errors" +) + +type requestInfoKey int + +// RequestInfoContextKey is used to store RequestInfo in a context.Context. +const RequestInfoContextKey = requestInfoKey(0) + +// RequestInfo carries useful information about the request. +type RequestInfo struct { + Mutations []string + Queries []string +} + +type Handler struct { + c Config + + alert *g.Object + alertSummary *g.Object + alertLog *g.Object + alertLogSubject *g.Object + user *g.Object + service *g.Object + contactMethod *g.Object + notificationRule *g.Object + schedule *g.Object + rotation *g.Object + rotationParticipant *g.Object + escalationPolicyStep *g.Object + escalationPolicy *g.Object + integrationKey *g.Object + scheduleRule *g.Object + scheduleAssignment *g.Object + scheduleShift *g.Object + onCallAssignment *g.Object + createAll *g.Object + heartbeat *g.Object + userOverride *g.Object + deleteAll *g.Object + serviceOnCallUser *g.Object + + rotationShift *g.Object + + sourceType *g.Union + targetType *g.Union + assignmentSource *g.Object + assignmentTarget *g.Object + label *g.Object + + legacyDB *legacyDB + + shiftCalc *shiftcalc.ShiftCalculator + // resolver resolver.ResolveWalker + + schema g.Schema +} + +type fieldConfigAdder interface { + AddFieldConfig(string, *g.Field) +} + +func addFields(o fieldConfigAdder, f g.Fields) { + for n, f := range f { + if f.Resolve != nil { + f.Resolve = wrapFieldResolver(f.Resolve) + } + o.AddFieldConfig(n, f) + } +} + +func NewHandler(ctx context.Context, c Config) (*Handler, error) { + + obj := func(name, desc string, ifaces ...*g.Interface) *g.Object { + return g.NewObject(g.ObjectConfig{ + Name: name, + Description: desc, + Fields: g.Fields{}, + Interfaces: ifaces, + }) + } + c = cachedConfig(c) + + db, err := newLegacyDB(ctx, c.DB) + if err != nil { + return nil, err + } + + h := &Handler{ + c: c, + shiftCalc: &shiftcalc.ShiftCalculator{ + RotStore: c.RotationStore, + RuleStore: c.ScheduleRuleStore, + SchedStore: c.ScheduleStore, + }, + + legacyDB: db, + + alert: obj("Alert", "An alert."), + alertLogSubject: obj("AlertLogSubject", "The entity associated with the log event (e.g. the user who closed the alert)."), + user: obj("User", "A user."), + service: obj("Service", "A registered service."), + alertLog: obj("AlertLog", "A log entry for Alert activity."), + contactMethod: obj("ContactMethod", "A method of notifying (contacting) a User."), + notificationRule: obj("NotificationRule", "A rule controlling how/when to use a ContactMethod to notifiy a User."), + schedule: obj("Schedule", "An on-call schedule."), + rotation: obj("Rotation", "An on-call rotation."), + rotationParticipant: obj("RotationParticipant", "A participant in an on-call rotation."), + escalationPolicyStep: obj("EscalationPolicyStep", "A single step of an escalation policy."), + escalationPolicy: obj("EscalationPolicy", "An escalation policy."), + integrationKey: obj("IntegrationKey", "An Integration."), + assignmentSource: obj("AssignmentSource", "The source of an assignment."), + assignmentTarget: obj("AssignmentTarget", "The target of an assignment."), + scheduleRule: obj("ScheduleRule", "A schedule rule."), + scheduleAssignment: obj("ScheduleAssignment", "A schedule assignment"), + scheduleShift: obj("ScheduleShift", "A single shift of a schedule."), + onCallAssignment: obj("OnCallAssignment", "Contains an assignment for a user oncall."), + createAll: obj("CreateAll", "Creates multiple resources at once."), + alertSummary: obj("AlertSummary", "Contains alert totals for a service."), + heartbeat: obj("Heartbeat", "A heartbeat check for a service."), + userOverride: obj("UserOverride", "An override event to add, remove, or swap a user."), + deleteAll: obj("DeleteAll", "Deletes multiple resources at once."), + label: obj("Label", "Labels are key/value pairs that are attached to objects, such as services."), + rotationShift: obj("RotationShift2", "A single shift of a rotation."), + serviceOnCallUser: obj("ServiceOnCallUser", "An on-call user assigned to a service."), + } + + h.sourceType = g.NewUnion(g.UnionConfig{ + Name: "SourceType", + Description: "Source object type of an assignment.", + Types: []*g.Object{h.alert, h.user, h.service, h.rotationParticipant, h.escalationPolicyStep}, + ResolveType: func(p g.ResolveTypeParams) *g.Object { + src, ok := p.Value.(assignment.Source) + if !ok { + return nil + } + switch src.SourceType() { + case assignment.SrcTypeAlert: + return h.alert + case assignment.SrcTypeEscalationPolicyStep: + return h.escalationPolicyStep + case assignment.SrcTypeRotationParticipant: + return h.rotationParticipant + case assignment.SrcTypeScheduleRule: + return h.scheduleRule + case assignment.SrcTypeService: + return h.service + case assignment.SrcTypeUser: + return h.user + } + return nil + }, + }) + + h.targetType = g.NewUnion(g.UnionConfig{ + Name: "TargetType", + Description: "Target object type of an assignment.", + Types: []*g.Object{h.user, h.service, h.schedule, h.rotation, h.escalationPolicy}, + ResolveType: func(p g.ResolveTypeParams) *g.Object { + switch p.Value.(type) { + case user.User, *user.User: + return h.user + case service.Service, *service.Service: + return h.service + case schedule.Schedule, *schedule.Schedule: + return h.schedule + case rotation.Rotation, *rotation.Rotation: + return h.rotation + case escalation.Policy, *escalation.Policy: + return h.escalationPolicy + } + + return nil + }, + }) + + addFields(h.user, h.userFields()) + addFields(h.alertLogSubject, h.alertLogSubjectFields()) + addFields(h.alert, h.alertFields()) + addFields(h.alertLog, h.alertLogFields()) + addFields(h.contactMethod, h.CMFields()) + addFields(h.notificationRule, h.NRFields()) + addFields(h.service, h.serviceFields()) + addFields(h.schedule, h.scheduleFields()) + addFields(h.rotation, h.rotationFields()) + addFields(h.rotationParticipant, h.rotationParticipantFields()) + addFields(h.escalationPolicyStep, h.escalationPolicyStepFields()) + addFields(h.escalationPolicy, h.escalationPolicyFields()) + addFields(h.integrationKey, h.integrationKeyFields()) + addFields(h.assignmentSource, h.assignmentSourceFields()) + addFields(h.assignmentTarget, h.assignmentTargetFields()) + addFields(h.scheduleRule, h.scheduleRuleFields()) + addFields(h.scheduleAssignment, h.scheduleAssignmentFields()) + addFields(h.scheduleShift, h.scheduleShiftFields()) + addFields(h.onCallAssignment, h.onCallAssignmentFields()) + addFields(h.createAll, h.createAllFields()) + addFields(h.alertSummary, h.alertSummaryFields()) + addFields(h.heartbeat, h.heartbeatMonitorFields()) + addFields(h.userOverride, h.userOverrideFields()) + addFields(h.deleteAll, h.deleteAllFields()) + addFields(h.label, h.labelFields()) + addFields(h.rotationShift, h.rotationShiftFields()) + addFields(h.serviceOnCallUser, h.serviceOnCallUserFields()) + + err = h.buildSchema() + if err != nil { + return nil, err + } + + return h, nil +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + err := permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized) + return + } + + data, err := ioutil.ReadAll(req.Body) + if err != nil { + log.Debug(ctx, errors.Wrap(err, "read GraphQL query")) + http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) + return + } + + var b struct { + Query string + Variables map[string]interface{} + } + + err = json.Unmarshal(data, &b) + if err != nil { + log.Debug(ctx, errors.Wrap(err, "parse GraphQL query")) + http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) + return + } + + var r *g.Result + params := g.Params{ + Context: ctx, + Schema: h.schema, + RequestString: b.Query, + VariableValues: b.Variables, + } + + if info, ok := ctx.Value(RequestInfoContextKey).(*RequestInfo); ok && info != nil { + // If we have access to the RequestInfo pointer, we parse the query and try to gleam some + // useful info to store. + // + // If the request info is missing (e.g. a future option to disable it) we just gracefully + // ignore it and move on as before. + source := source.NewSource(&source.Source{ + Body: []byte(b.Query), + Name: "GraphQL Request", + }) + a, err := parser.Parse(parser.ParseParams{Source: source}) + if err != nil { + log.Debug(ctx, errors.Wrap(err, "parse GraphQL query")) + http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) + return + } + + // Safely walk the graphql AST by whatever means necessary... + for _, n := range a.Definitions { + switch def := n.(type) { + case *ast.OperationDefinition: + for _, sel := range def.GetSelectionSet().Selections { + f, ok := sel.(*ast.Field) + if !ok { + continue + } + + switch def.GetOperation() { + case "query": + info.Queries = append(info.Queries, f.Name.Value) + case "mutation": + info.Mutations = append(info.Mutations, f.Name.Value) + } + } + } + } + + // Sorted things are appreciated in logs. + sort.Strings(info.Queries) + sort.Strings(info.Mutations) + } + + r = g.Do(params) + err = json.NewEncoder(w).Encode(r) + if errutil.HTTPError(ctx, w, errors.Wrap(err, "serialize GraphQL response")) { + return + } +} diff --git a/graphql/heartbeat.go b/graphql/heartbeat.go new file mode 100644 index 0000000000..45d91b3101 --- /dev/null +++ b/graphql/heartbeat.go @@ -0,0 +1,106 @@ +package graphql + +import ( + "github.com/target/goalert/heartbeat" + "net/url" + + g "github.com/graphql-go/graphql" + "github.com/pkg/errors" +) + +func getHeartbeatMonitor(src interface{}) (*heartbeat.Monitor, error) { + switch s := src.(type) { + case *heartbeat.Monitor: + return s, nil + case heartbeat.Monitor: + return &s, nil + default: + return nil, errors.Errorf("could not get heartbeat key (unknown source type %T)", s) + } +} + +var heartbeatMonitorState = g.NewEnum(g.EnumConfig{ + Name: "HeartbeatMonitorState", + Values: g.EnumValueConfigMap{ + "inactive": &g.EnumValueConfig{Value: heartbeat.StateInactive}, + "healthy": &g.EnumValueConfig{Value: heartbeat.StateHealthy}, + "unhealthy": &g.EnumValueConfig{Value: heartbeat.StateUnhealthy}, + }, +}) + +func (h *Handler) heartbeatMonitorFields() g.Fields { + return g.Fields{ + "id": &g.Field{Type: g.String}, + "name": &g.Field{Type: g.String}, + "service_id": &g.Field{Type: g.String}, + "last_state": &g.Field{ + Type: heartbeatMonitorState, + Resolve: func(p g.ResolveParams) (interface{}, error) { + h, err := getHeartbeatMonitor(p.Source) + if err != nil { + return nil, err + } + return h.LastState(), nil + }, + }, + "last_heartbeat_minutes": &g.Field{ + Type: g.Int, + Description: "Number of full minutes elapsed since last heartbeat.", + Resolve: func(p g.ResolveParams) (interface{}, error) { + h, err := getHeartbeatMonitor(p.Source) + if err != nil { + return nil, err + } + sec, ok := h.LastHeartbeatMinutes() + if !ok { + return nil, nil + } + return sec, nil + }, + }, + "interval_minutes": &g.Field{Type: g.Int}, + + "href": &g.Field{ + Type: g.String, + Resolve: func(p g.ResolveParams) (interface{}, error) { + h, err := getHeartbeatMonitor(p.Source) + if err != nil { + return nil, err + } + return "/v1/api/heartbeat/" + url.PathEscape(h.ID), nil + }, + }, + } +} + +func (h *Handler) deleteHeartbeatMonitorField() *g.Field { + return &g.Field{ + Type: g.NewObject(g.ObjectConfig{ + Name: "DeleteHeartbeatMonitorOutput", + Fields: g.Fields{"deleted_id": &g.Field{Type: g.String}}, + }), + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "DeleteHeartbeatMonitorInput", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + var r struct { + ID string `json:"deleted_id"` + } + + r.ID, _ = m["id"].(string) + return newScrubber(p.Context).scrub(r, h.c.HeartbeatStore.DeleteTx(p.Context, nil, r.ID)) + }, + } +} diff --git a/graphql/integrationkey.go b/graphql/integrationkey.go new file mode 100644 index 0000000000..e26b423a83 --- /dev/null +++ b/graphql/integrationkey.go @@ -0,0 +1,181 @@ +package graphql + +import ( + "errors" + "github.com/target/goalert/config" + "github.com/target/goalert/validation" + "net/url" + + "github.com/target/goalert/integrationkey" + + "fmt" + + g "github.com/graphql-go/graphql" +) + +func (h *Handler) integrationKeyFields() g.Fields { + return g.Fields{ + "id": &g.Field{Type: g.String}, + "name": &g.Field{Type: g.String}, + "type": &g.Field{Type: integrationKeyType}, + "service_id": &g.Field{Type: g.String}, + "service": &g.Field{ + Type: h.service, + Resolve: func(p g.ResolveParams) (interface{}, error) { + var ID string + switch i := p.Source.(type) { + case integrationkey.IntegrationKey: + ID = i.ServiceID + case *integrationkey.IntegrationKey: + ID = i.ServiceID + default: + return nil, fmt.Errorf("could not resolve ServiceID of integration_key (unknown source type %T)", i) + } + + return newScrubber(p.Context).scrub(h.c.ServiceStore.FindOne(p.Context, ID)) + }, + }, + "href": &g.Field{ + Type: g.String, + Resolve: func(p g.ResolveParams) (interface{}, error) { + cfg := config.FromContext(p.Context) + var key integrationkey.IntegrationKey + switch i := p.Source.(type) { + case integrationkey.IntegrationKey: + key = i + case *integrationkey.IntegrationKey: + key = *i + default: + return nil, fmt.Errorf("error resolving key ID (unknown source type %T)", i) + } + + switch key.Type { + case integrationkey.TypeGeneric: + return "/v1/api/alerts?integration_key=" + url.QueryEscape(key.ID), nil + case integrationkey.TypeGrafana: + return "/v1/webhooks/grafana?integration_key=" + url.QueryEscape(key.ID), nil + case integrationkey.TypeEmail: + if !cfg.Mailgun.Enable || cfg.Mailgun.EmailDomain == "" { + return "", nil + } + return "mailto:" + key.ID + "@" + cfg.Mailgun.EmailDomain, nil + } + + return "#" + url.QueryEscape(key.ID), nil + }, + }, + } +} + +func (h *Handler) integrationKeyField() *g.Field { + return &g.Field{ + Type: h.integrationKey, + Args: g.FieldConfigArgument{ + "id": &g.ArgumentConfig{ + Type: g.NewNonNull(g.String), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + id, ok := p.Args["id"].(string) + if !ok { + return nil, validation.NewFieldError("id", "required") + } + + return newScrubber(p.Context).scrub(h.c.IntegrationKeyStore.FindOne(p.Context, id)) + }, + } +} + +func (h *Handler) integrationKeysField() *g.Field { + return &g.Field{ + Name: "IntegrationKeys", + Type: g.NewList(h.integrationKey), + Args: g.FieldConfigArgument{ + "service_id": &g.ArgumentConfig{ + Type: g.NewNonNull(g.String), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + id, ok := p.Args["service_id"].(string) + if !ok { + return nil, validation.NewFieldError("service_id", "required") + } + return newScrubber(p.Context).scrub(h.c.IntegrationKeyStore.FindAllByService(p.Context, id)) + }, + } +} + +var integrationKeyType = g.NewEnum(g.EnumConfig{ + Name: "IntegrationKeyType", + Values: g.EnumValueConfigMap{ + "grafana": &g.EnumValueConfig{Value: integrationkey.TypeGrafana}, + "generic": &g.EnumValueConfig{Value: integrationkey.TypeGeneric}, + "email": &g.EnumValueConfig{Value: integrationkey.TypeEmail}, + }, +}) + +func (h *Handler) createIntegrationKeyField() *g.Field { + return &g.Field{ + Type: h.integrationKey, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Description: "because bugs.", + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "CreateIntegrationKeyInput", + Fields: g.InputObjectConfigFieldMap{ + "name": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "type": &g.InputObjectFieldConfig{Type: g.NewNonNull(integrationKeyType)}, + "service_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + var i integrationkey.IntegrationKey + i.Name, _ = m["name"].(string) + i.Type, _ = m["type"].(integrationkey.Type) + i.ServiceID, _ = m["service_id"].(string) + + return newScrubber(p.Context).scrub(h.c.IntegrationKeyStore.Create(p.Context, &i)) + }, + } +} + +func (h *Handler) deleteIntegrationKeyField() *g.Field { + return &g.Field{ + Type: g.NewObject(g.ObjectConfig{ + Name: "DeleteIntegrationKeyOutput", + Fields: g.Fields{"deleted_id": &g.Field{Type: g.String}}, + }), + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Description: "because bugs.", + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "DeleteIntegrationKeyInput", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + var r struct { + ID string `json:"deleted_id"` + } + + r.ID, _ = m["id"].(string) + + return newScrubber(p.Context).scrub(r, h.c.IntegrationKeyStore.Delete(p.Context, r.ID)) + }, + } +} diff --git a/graphql/label.go b/graphql/label.go new file mode 100644 index 0000000000..98ffe4e3bb --- /dev/null +++ b/graphql/label.go @@ -0,0 +1,75 @@ +package graphql + +import ( + "github.com/target/goalert/assignment" + "github.com/target/goalert/label" + + g "github.com/graphql-go/graphql" + "github.com/pkg/errors" +) + +func (h *Handler) labelFields() g.Fields { + return g.Fields{ + "key": &g.Field{ + Type: g.String, + }, + "value": &g.Field{ + Type: g.String, + }, + } +} + +func (h *Handler) setLabelField() *g.Field { + return &g.Field{ + Type: g.Boolean, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "SetLabelInput", + Fields: g.InputObjectConfigFieldMap{ + "target_type": &g.InputObjectFieldConfig{Type: g.NewNonNull(assignmentTargetType)}, + "target_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "key": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "value": &g.InputObjectFieldConfig{Type: g.String}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + var lbl label.Label + + var tgt assignment.RawTarget + tgt.ID, _ = m["target_id"].(string) + tgt.Type, _ = m["target_type"].(assignment.TargetType) + lbl.Target = tgt + lbl.Key, _ = m["key"].(string) + lbl.Value, _ = m["value"].(string) + + err := h.c.LabelStore.SetTx(p.Context, nil, &lbl) + if err != nil { + return newScrubber(p.Context).scrub(nil, errors.Wrap(err, "set label")) + } + + return true, nil + }, + } +} + +func (h *Handler) labelKeysField() *g.Field { + return &g.Field{ + Description: "All unique keys for labels.", + Type: g.NewList(g.String), + Resolve: func(p g.ResolveParams) (interface{}, error) { + keys, err := h.c.LabelStore.UniqueKeys(p.Context) + if err != nil { + return newScrubber(p.Context).scrub(nil, errors.Wrap(err, "get unique keys")) + } + return newScrubber(p.Context).scrub(keys, nil) + }, + } +} diff --git a/graphql/legacydb.go b/graphql/legacydb.go new file mode 100644 index 0000000000..b8f5e4c541 --- /dev/null +++ b/graphql/legacydb.go @@ -0,0 +1,105 @@ +package graphql + +import ( + "context" + "database/sql" + "github.com/target/goalert/permission" + "github.com/target/goalert/util" + "github.com/target/goalert/validation/validate" +) + +type legacyDB struct { + db *sql.DB + + schedFromRot *sql.Stmt + rotFromSched *sql.Stmt + allRotFromSched *sql.Stmt +} + +func newLegacyDB(ctx context.Context, db *sql.DB) (*legacyDB, error) { + p := &util.Prepare{DB: db, Ctx: ctx} + + return &legacyDB{ + db: db, + + schedFromRot: p.P(` + SELECT schedule_id + FROM schedule_rules + WHERE tgt_rotation_id = $1 and tgt_rotation_id notnull + LIMIT 1 + `), + rotFromSched: p.P(` + SELECT tgt_rotation_id + FROM schedule_rules + WHERE schedule_id = $1 and tgt_rotation_id notnull + LIMIT 1 + `), + allRotFromSched: p.P(` + SELECT DISTINCT tgt_rotation_id + FROM schedule_rules + WHERE schedule_id = $1 and tgt_rotation_id notnull + `), + }, p.Err +} +func (l *legacyDB) ScheduleIDFromRotation(ctx context.Context, rotID string) (string, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return "", err + } + err = validate.UUID("RotationID", rotID) + if err != nil { + return "", err + } + + var schedID string + err = l.schedFromRot.QueryRowContext(ctx, rotID).Scan(&schedID) + if err != nil { + return "", err + } + return schedID, nil +} + +func (l *legacyDB) RotationIDFromScheduleID(ctx context.Context, schedID string) (string, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return "", err + } + err = validate.UUID("ScheduleID", schedID) + if err != nil { + return "", err + } + + var rotID string + err = l.rotFromSched.QueryRowContext(ctx, schedID).Scan(&rotID) + if err != nil { + return "", err + } + return rotID, nil +} +func (l *legacyDB) FindAllRotationIDsFromScheduleID(ctx context.Context, schedID string) ([]string, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + err = validate.UUID("ScheduleID", schedID) + if err != nil { + return nil, err + } + + rows, err := l.allRotFromSched.QueryContext(ctx, schedID) + if err != nil { + return nil, err + } + defer rows.Close() + + var rotationIDs []string + for rows.Next() { + var rotID string + err = rows.Scan(&rotID) + if err != nil { + return nil, err + } + rotationIDs = append(rotationIDs, rotID) + } + return rotationIDs, nil +} diff --git a/graphql/limit.go b/graphql/limit.go new file mode 100644 index 0000000000..14f0efb725 --- /dev/null +++ b/graphql/limit.go @@ -0,0 +1,65 @@ +package graphql + +import ( + "errors" + "github.com/target/goalert/limit" + + g "github.com/graphql-go/graphql" +) + +var limitID = g.NewEnum(g.EnumConfig{ + Name: "LimitID", + Values: g.EnumValueConfigMap{ + "notification_rules_per_user": &g.EnumValueConfig{Value: limit.NotificationRulesPerUser}, + "contact_methods_per_user": &g.EnumValueConfig{Value: limit.ContactMethodsPerUser}, + "ep_steps_per_policy": &g.EnumValueConfig{Value: limit.EPStepsPerPolicy}, + "ep_actions_per_step": &g.EnumValueConfig{Value: limit.EPActionsPerStep}, + "participants_per_rotation": &g.EnumValueConfig{Value: limit.ParticipantsPerRotation}, + "rules_per_schedule": &g.EnumValueConfig{Value: limit.RulesPerSchedule}, + "integration_keys_per_service": &g.EnumValueConfig{Value: limit.IntegrationKeysPerService}, + "unacked_alerts_per_service": &g.EnumValueConfig{Value: limit.UnackedAlertsPerService}, + "targets_per_schedule": &g.EnumValueConfig{Value: limit.TargetsPerSchedule}, + "heartbeat_monitors_per_service": &g.EnumValueConfig{Value: limit.HeartbeatMonitorsPerService}, + "user_overrides_per_schedule": &g.EnumValueConfig{Value: limit.UserOverridesPerSchedule}, + }, +}) + +func (h *Handler) updateConfigLimitField() *g.Field { + return &g.Field{ + Type: g.NewObject(g.ObjectConfig{ + Name: "UpdateConfigLimitOutput", + Fields: g.Fields{ + "id": &g.Field{Type: limitID}, + "max": &g.Field{Type: g.Int}, + }, + }), + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewNonNull(g.NewInputObject(g.InputObjectConfig{ + Name: "UpdateConfigLimitInput", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.NewNonNull(limitID)}, + "max": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Int)}, + }, + })), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + var res struct { + ID limit.ID `json:"id"` + Max int `json:"max"` + } + + res.ID, _ = m["id"].(limit.ID) + res.Max, _ = m["max"].(int) + + err := h.c.LimitStore.SetMax(p.Context, res.ID, res.Max) + return newScrubber(p.Context).scrub(res, err) + }, + } +} diff --git a/graphql/notificationrule.go b/graphql/notificationrule.go new file mode 100644 index 0000000000..f48a3c9fc2 --- /dev/null +++ b/graphql/notificationrule.go @@ -0,0 +1,155 @@ +package graphql + +import ( + "errors" + "github.com/target/goalert/user/notificationrule" + + "fmt" + + g "github.com/graphql-go/graphql" +) + +func (h *Handler) NRFields() g.Fields { + return g.Fields{ + "id": &g.Field{Type: g.String}, + "delay_minutes": &g.Field{Type: g.Int, Description: "Delay in minutes."}, + "delay": &g.Field{ + Type: g.Int, + DeprecationReason: "use 'delay_minutes' instead", + Resolve: func(p g.ResolveParams) (interface{}, error) { + + var n notificationrule.NotificationRule + + switch t := p.Source.(type) { + case notificationrule.NotificationRule: + n = t + case *notificationrule.NotificationRule: + n = *t + default: + return nil, fmt.Errorf("invalid source type for notification rule %T", t) + } + + return n.DelayMinutes, nil + }, + }, + "contact_method_id": &g.Field{Type: g.String}, + "contact_method": &g.Field{ + Type: h.contactMethod, + Resolve: func(p g.ResolveParams) (interface{}, error) { + var n notificationrule.NotificationRule + + switch t := p.Source.(type) { + case notificationrule.NotificationRule: + n = t + case *notificationrule.NotificationRule: + n = *t + default: + return nil, fmt.Errorf("invalid source type for notification rule %T", t) + } + + return newScrubber(p.Context).scrub(h.c.CMStore.FindOne(p.Context, n.ContactMethodID)) + }, + }, + } +} + +func (h *Handler) updateNotificationRuleField() *g.Field { + return &g.Field{ + Type: h.notificationRule, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Description: "because bugs.", + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "UpdateNotificationRuleInput", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "delay_minutes": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Int)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + id, _ := m["id"].(string) + delay, _ := m["delay_minutes"].(int) + + err := h.c.NRStore.UpdateDelay(p.Context, id, delay) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + return newScrubber(p.Context).scrub(h.c.NRStore.FindOne(p.Context, id)) + }, + } +} + +func (h *Handler) createNotificationRuleField() *g.Field { + return &g.Field{ + Type: h.notificationRule, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Description: "because bugs.", + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "CreateNotificationRuleInput", + Fields: g.InputObjectConfigFieldMap{ + "user_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "delay_minutes": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Int)}, + "contact_method_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + var n notificationrule.NotificationRule + + n.UserID, _ = m["user_id"].(string) + n.DelayMinutes, _ = m["delay_minutes"].(int) + n.ContactMethodID, _ = m["contact_method_id"].(string) + + return newScrubber(p.Context).scrub(h.c.NRStore.Insert(p.Context, &n)) + }, + } +} + +func (h *Handler) deleteNotificationRuleField() *g.Field { + return &g.Field{ + Type: g.NewObject(g.ObjectConfig{ + Name: "DeleteNotificationRuleOutput", + Fields: g.Fields{"deleted_id": &g.Field{Type: g.String}}, + }), + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Description: "because bugs.", + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "DeleteNotificationRuleInput", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + var n struct { + ID string `json:"deleted_id"` + } + + n.ID, _ = m["id"].(string) + + err := h.c.NRStore.Delete(p.Context, n.ID) + return newScrubber(p.Context).scrub(&n, err) + }, + } +} diff --git a/graphql/oncallassignment.go b/graphql/oncallassignment.go new file mode 100644 index 0000000000..6104a8aca5 --- /dev/null +++ b/graphql/oncallassignment.go @@ -0,0 +1,21 @@ +package graphql + +import ( + g "github.com/graphql-go/graphql" +) + +func (h *Handler) onCallAssignmentFields() g.Fields { + return g.Fields{ + "is_active": &g.Field{Type: g.Boolean}, + "service_id": &g.Field{Type: g.String}, + "service_name": &g.Field{Type: g.String}, + "escalation_policy_id": &g.Field{Type: g.String}, + "escalation_policy_name": &g.Field{Type: g.String}, + "escalation_policy_step_number": &g.Field{Type: g.Int}, + "rotation_id": &g.Field{Type: g.String}, + "rotation_name": &g.Field{Type: g.String}, + "schedule_id": &g.Field{Type: g.String}, + "schedule_name": &g.Field{Type: g.String}, + "user_id": &g.Field{Type: g.String}, + } +} diff --git a/graphql/rotation.go b/graphql/rotation.go new file mode 100644 index 0000000000..d8317cf196 --- /dev/null +++ b/graphql/rotation.go @@ -0,0 +1,323 @@ +package graphql + +import ( + "fmt" + "github.com/target/goalert/assignment" + "github.com/target/goalert/schedule/rotation" + "github.com/target/goalert/util" + "github.com/target/goalert/validation" + "time" + + g "github.com/graphql-go/graphql" + "github.com/pkg/errors" +) + +var rotationTypeEnum = g.NewEnum(g.EnumConfig{ + Name: "RotationType", + Values: g.EnumValueConfigMap{ + "daily": &g.EnumValueConfig{Value: rotation.TypeDaily}, + "weekly": &g.EnumValueConfig{Value: rotation.TypeWeekly}, + "hourly": &g.EnumValueConfig{Value: rotation.TypeHourly}, + }, +}) + +func (h *Handler) rotationShiftFields() g.Fields { + return g.Fields{ + "start_time": &g.Field{Type: ISOTimestamp}, + "end_time": &g.Field{Type: ISOTimestamp}, + "participant_id": &g.Field{Type: g.String}, + } +} + +func getRot(src interface{}) (*rotation.Rotation, error) { + switch s := src.(type) { + case rotation.Rotation: + return &s, nil + case *rotation.Rotation: + return s, nil + default: + return nil, fmt.Errorf("invalid source type %T for rotation", s) + } +} + +func (h *Handler) rotationFields() g.Fields { + return g.Fields{ + "id": &g.Field{Type: g.String}, + "name": &g.Field{Type: g.String}, + "description": &g.Field{Type: g.String}, + "type": &g.Field{Type: rotationTypeEnum}, + "start": &g.Field{Type: ISOTimestamp}, + "shift_length": &g.Field{Type: g.Int}, + "target_type": targetTypeField(assignment.TargetTypeRotation), + "schedule_id": &g.Field{ + Type: g.String, + Resolve: func(p g.ResolveParams) (interface{}, error) { + r, err := getRot(p.Source) + if err != nil { + return nil, err + } + + scrub := newScrubber(p.Context).scrub + return scrub(h.legacyDB.ScheduleIDFromRotation(p.Context, r.ID)) + }, + }, + "schedule": &g.Field{ + Type: g.String, + Resolve: func(p g.ResolveParams) (interface{}, error) { + r, err := getRot(p.Source) + if err != nil { + return nil, err + } + + scrub := newScrubber(p.Context).scrub + + schedID, err := h.legacyDB.ScheduleIDFromRotation(p.Context, r.ID) + if err != nil { + return scrub(nil, err) + } + + return scrub(h.c.ScheduleStore.FindOne(p.Context, schedID)) + }, + }, + + "shifts": &g.Field{ + Type: g.NewList(h.rotationShift), + Args: g.FieldConfigArgument{ + "start_time": &g.ArgumentConfig{Type: g.NewNonNull(g.String)}, + "end_time": &g.ArgumentConfig{Type: g.NewNonNull(g.String)}, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + return nil, nil + }, + }, + + "time_zone": &g.Field{ + Type: g.String, + Resolve: func(p g.ResolveParams) (interface{}, error) { + r, err := getRot(p.Source) + if err != nil { + return nil, err + } + + return r.Start.Location().String(), nil + }, + }, + + "active_participant_id": &g.Field{ + Type: g.String, + Resolve: func(p g.ResolveParams) (interface{}, error) { + r, err := getRot(p.Source) + if err != nil { + return nil, err + } + scrub := newScrubber(p.Context).scrub + + state, err := h.c.RotationStore.State(p.Context, r.ID) + if err == rotation.ErrNoState { + return -1, nil + } + if err != nil { + return scrub(nil, err) + } + + return state.ParticipantID, nil + }, + }, + + "next_handoff_times": &g.Field{ + Type: g.NewList(ISOTimestamp), + Resolve: func(p g.ResolveParams) (interface{}, error) { + r, err := getRot(p.Source) + if err != nil { + return nil, err + } + + scrub := newScrubber(p.Context).scrub + parts, err := h.c.RotationStore.FindAllParticipants(p.Context, r.ID) + if err != nil { + return scrub(nil, err) + } + + shiftState, err := h.c.RotationStore.State(p.Context, r.ID) + if err == rotation.ErrNoState { + // rotation hasn't been started/processed yet + return nil, nil + } + + if err != nil { + return scrub(nil, err) + } + + var shifts []time.Time + cEnd := r.EndTime(shiftState.ShiftStart) + for range parts { + shifts = append(shifts, cEnd) + cEnd = r.EndTime(cEnd) + } + + return shifts, nil + }, + }, + + "participants": &g.Field{ + Type: g.NewList(h.rotationParticipant), + Resolve: func(p g.ResolveParams) (interface{}, error) { + r, err := getRot(p.Source) + if err != nil { + return nil, err + } + + return newScrubber(p.Context).scrub(h.c.RotationStore.FindAllParticipants(p.Context, r.ID)) + }, + }, + } +} + +func (h *Handler) createOrUpdateRotationField() *g.Field { + return &g.Field{ + Type: g.NewObject(g.ObjectConfig{ + Name: "CreateOrUpdateRotationOutput", + Fields: g.Fields{ + "created": &g.Field{Type: g.Boolean, Description: "Signifies if a new record was created."}, + "rotation": &g.Field{Type: h.rotation, Description: "The created or updated record."}, + }, + }), + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewNonNull(g.NewInputObject(g.InputObjectConfig{ + Name: "CreateOrUpdateRotationInput", + Description: "Add rotation to a schedule", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.String, Description: "Specifies an existing rotation to update."}, + "name": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "description": &g.InputObjectFieldConfig{Type: g.String}, + "type": &g.InputObjectFieldConfig{Type: g.NewNonNull(rotationTypeEnum)}, + "start": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "shift_length": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Int)}, + "schedule_id": &g.InputObjectFieldConfig{Type: g.String}, + "time_zone": &g.InputObjectFieldConfig{Type: g.String}, + }, + })), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + var r rotation.Rotation + + r.ID, _ = m["id"].(string) + r.Name, _ = m["name"].(string) + r.Description, _ = m["description"].(string) + r.Type, _ = m["type"].(rotation.Type) + + sTime, _ := m["start"].(string) + var err error + r.Start, err = time.Parse(time.RFC3339, sTime) + if err != nil { + return nil, validation.NewFieldError("start", "invalid format for time value: "+err.Error()) + } + + r.ShiftLength, _ = m["shift_length"].(int) + schedID, ok := m["schedule_id"].(string) + if ok { + sched, err := h.c.ScheduleStore.FindOne(p.Context, schedID) + if err != nil { + return newScrubber(p.Context).scrub(nil, errors.Wrap(err, "lookup schedule")) + } + r.Name = sched.Name + " Rotation" + r.Start = r.Start.In(sched.TimeZone) + } else { + tz, _ := m["time_zone"].(string) + if tz == "" { + return nil, validation.NewFieldError("time_zone", "must not be empty") + } + loc, err := util.LoadLocation(tz) + if err != nil { + return nil, validation.NewFieldError("time_zone", "invalid time_zone: "+err.Error()) + } + r.Start = r.Start.In(loc) + } + + var create bool + + if r.ID == "" { + create = true + rot, err := h.c.RotationStore.CreateRotation(p.Context, &r) + if err != nil { + return newScrubber(p.Context).scrub(nil, errors.Wrap(err, "create rotation")) + } + r = *rot + } else { + err = h.c.RotationStore.UpdateRotation(p.Context, &r) + if err != nil { + return newScrubber(p.Context).scrub(nil, errors.Wrap(err, "update rotation")) + } + } + + var resp struct { + Created bool `json:"created"` + Rotation rotation.Rotation `json:"rotation"` + } + resp.Created = create + resp.Rotation = r + return resp, nil + }, + } +} + +func (h *Handler) deleteRotationField() *g.Field { + return &g.Field{ + Type: g.NewObject(g.ObjectConfig{ + Name: "DeleteRotationOutput", + Fields: g.Fields{"deleted_id": &g.Field{Type: g.String}}, + }), + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewNonNull(g.NewInputObject(g.InputObjectConfig{ + Name: "DeleteRotationInput", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + })), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + var r struct { + ID string `json:"deleted_id"` + } + r.ID, _ = m["id"].(string) + return newScrubber(p.Context).scrub(r, h.c.RotationStore.DeleteRotation(p.Context, r.ID)) + }, + } +} + +func (h *Handler) rotationsField() *g.Field { + return &g.Field{ + Type: g.NewList(h.rotation), + Resolve: func(p g.ResolveParams) (interface{}, error) { + return newScrubber(p.Context).scrub(h.c.RotationStore.FindAllRotations(p.Context)) + }, + } +} +func (h *Handler) rotationField() *g.Field { + return &g.Field{ + Type: h.rotation, + Args: g.FieldConfigArgument{ + "id": &g.ArgumentConfig{ + Type: g.NewNonNull(g.String), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + id, _ := p.Args["id"].(string) + return newScrubber(p.Context).scrub(h.c.RotationStore.FindRotation(p.Context, id)) + }, + } +} diff --git a/graphql/rotationparticipant.go b/graphql/rotationparticipant.go new file mode 100644 index 0000000000..9555535987 --- /dev/null +++ b/graphql/rotationparticipant.go @@ -0,0 +1,322 @@ +package graphql + +import ( + "errors" + "fmt" + "github.com/target/goalert/assignment" + "github.com/target/goalert/schedule/rotation" + "github.com/target/goalert/validation/validate" + + g "github.com/graphql-go/graphql" +) + +func getRotationPart(s interface{}) (*rotation.Participant, error) { + switch p := s.(type) { + case rotation.Participant: + return &p, nil + case *rotation.Participant: + return p, nil + default: + return nil, fmt.Errorf("invalid source type %T for rotation participant", p) + } +} + +func (h *Handler) rotationParticipantFields() g.Fields { + return g.Fields{ + "id": &g.Field{Type: g.String}, + "position": &g.Field{Type: g.Int}, + "user_id": &g.Field{Type: g.String}, + "rotation_id": &g.Field{Type: g.String}, + + "rotation": &g.Field{ + Type: h.rotation, + Resolve: func(p g.ResolveParams) (interface{}, error) { + rp, err := getRotationPart(p.Source) + if err != nil { + return nil, err + } + + return newScrubber(p.Context).scrub(h.c.RotationStore.FindRotation(p.Context, rp.RotationID)) + }, + }, + + "user": &g.Field{ + Type: h.user, + Resolve: func(p g.ResolveParams) (interface{}, error) { + rp, err := getRotationPart(p.Source) + if err != nil { + return nil, err + } + + if rp.Target.TargetType() == assignment.TargetTypeUser { + return newScrubber(p.Context).scrub(h.c.UserStore.FindOne(p.Context, rp.Target.TargetID())) + } + return nil, errors.New("no user assigned to that rotation slot") + }, + }, + } +} + +func (h *Handler) addRotationParticipantField() *g.Field { + return &g.Field{ + Description: "Adds a new participant to the end of a rotation. The same user can be added multiple times.", + Type: h.rotationParticipant, + DeprecationReason: "use addRotationParticipant2 instead", + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "AddRotationParticipantInput", + Fields: g.InputObjectConfigFieldMap{ + "rotation_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "user_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + rp := &rotation.Participant{} + var err error + rp.RotationID, _ = m["rotation_id"].(string) + + if id, ok := m["user_id"].(string); ok { + err = validate.UUID("user_id", id) + if err != nil { + return nil, err + } + rp.Target = assignment.UserTarget(id) + } + + return newScrubber(p.Context).scrub(h.c.RotationStore.AddParticipant(p.Context, rp)) + }, + } +} + +func (h *Handler) deleteRotationParticipantField() *g.Field { + return &g.Field{ + Description: "Remove a participant from a rotation.", + DeprecationReason: "use deleteRotationParticipant2 instead", + Type: g.NewObject(g.ObjectConfig{ + Name: "DeleteRotationParticipantOutput", + Fields: g.Fields{ + "deleted_id": &g.Field{Type: g.String}, + "rotation_id": &g.Field{Type: g.String}, + }, + }), + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "DeleteRotationParticipantInput", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + var r struct { + ID string `json:"deleted_id"` + RID string `json:"rotation_id"` + } + r.ID, _ = m["id"].(string) + var err error + r.RID, err = h.c.RotationStore.RemoveParticipant(p.Context, r.ID) + return newScrubber(p.Context).scrub(r, err) + + }, + } +} + +func (h *Handler) moveRotationParticipantField() *g.Field { + return &g.Field{ + Description: "Moves a participant to new_position, automatically shifting other participants around.", + DeprecationReason: "use moveRotationParticipant2 instead", + Type: g.NewList(h.rotationParticipant), + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "MoveRotationParticipantInput", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "new_position": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Int)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + id, _ := m["id"].(string) + newPos, _ := m["new_position"].(int) + + err := h.c.RotationStore.MoveParticipant(p.Context, id, newPos) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + + rp, err := h.c.RotationStore.FindParticipant(p.Context, id) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + return newScrubber(p.Context).scrub(h.c.RotationStore.FindAllParticipants(p.Context, rp.RotationID)) + }, + } +} + +func (h *Handler) addRotationParticipant2Field() *g.Field { + return &g.Field{ + Description: "Adds a new participant to the end of a rotation. The same user can be added multiple times.", + Type: h.rotation, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "AddRotationParticipant2Input", + Fields: g.InputObjectConfigFieldMap{ + "rotation_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "user_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + rp := &rotation.Participant{} + rp.RotationID, _ = m["rotation_id"].(string) + scrub := newScrubber(p.Context).scrub + if id, ok := m["user_id"].(string); ok { + err := validate.UUID("user_id", id) + if err != nil { + return nil, err + } + rp.Target = assignment.UserTarget(id) + } + + rp, err := h.c.RotationStore.AddParticipant(p.Context, rp) + if err != nil { + return scrub(nil, err) + } + + return scrub(h.c.RotationStore.FindRotation(p.Context, rp.RotationID)) + }, + } +} + +func (h *Handler) deleteRotationParticipant2Field() *g.Field { + return &g.Field{ + Description: "Remove a participant from a rotation.", + Type: h.rotation, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "DeleteRotationParticipant2Input", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + scrub := newScrubber(p.Context).scrub + partID, _ := m["id"].(string) + rotID, err := h.c.RotationStore.RemoveParticipant(p.Context, partID) + if err != nil { + return scrub(nil, err) + } + + return scrub(h.c.RotationStore.FindRotation(p.Context, rotID)) + }, + } +} + +func (h *Handler) moveRotationParticipant2Field() *g.Field { + return &g.Field{ + Description: "Moves a participant to new_position, automatically shifting other participants around.", + Type: h.rotation, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "MoveRotationParticipant2Input", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "new_position": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Int)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + id, _ := m["id"].(string) + newPos, _ := m["new_position"].(int) + scrub := newScrubber(p.Context).scrub + err := h.c.RotationStore.MoveParticipant(p.Context, id, newPos) + if err != nil { + return scrub(nil, err) + } + + rp, err := h.c.RotationStore.FindParticipant(p.Context, id) + if err != nil { + return scrub(nil, err) + } + + return scrub(h.c.RotationStore.FindRotation(p.Context, rp.RotationID)) + }, + } +} + +func (h *Handler) setActiveParticipantField() *g.Field { + return &g.Field{ + Description: "Sets a specified participant as active in the provided rotation.", + Type: h.rotation, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "SetActiveParticipantInput", + Fields: g.InputObjectConfigFieldMap{ + "rotation_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "participant_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + rotID, _ := m["rotation_id"].(string) + partID, _ := m["participant_id"].(string) + scrub := newScrubber(p.Context).scrub + err := h.c.RotationStore.SetActiveParticipant(p.Context, rotID, partID) + if err != nil { + return scrub(nil, err) + } + + return scrub(h.c.RotationStore.FindRotation(p.Context, rotID)) + }, + } +} diff --git a/graphql/schedule.go b/graphql/schedule.go new file mode 100644 index 0000000000..ecd04a7b6a --- /dev/null +++ b/graphql/schedule.go @@ -0,0 +1,449 @@ +package graphql + +import ( + "fmt" + "github.com/target/goalert/assignment" + "github.com/target/goalert/schedule" + "github.com/target/goalert/schedule/rotation" + "github.com/target/goalert/schedule/rule" + "github.com/target/goalert/util" + "github.com/target/goalert/validation" + "time" + + g "github.com/graphql-go/graphql" + "github.com/pkg/errors" +) + +func getSchedule(src interface{}) (*schedule.Schedule, error) { + switch t := src.(type) { + case schedule.Schedule: + return &t, nil + case *schedule.Schedule: + return t, nil + default: + return nil, fmt.Errorf("invalid source type for schedule %T", t) + } +} + +func (h *Handler) getRotation(p g.ResolveParams) (r rotation.Rotation, err error) { + s, err := getSchedule(p.Source) + if err != nil { + return r, err + } + + rotID, err := h.legacyDB.RotationIDFromScheduleID(p.Context, s.ID) + if err != nil { + return r, errors.Wrap(err, "fetch rotation ID") + } + + rt, err := h.c.RotationStore.FindRotation(p.Context, rotID) + if err != nil { + return r, errors.Wrap(err, "fetch rotation") + } + + return *rt, nil +} + +func (h *Handler) getRotations(p g.ResolveParams) ([]rotation.Rotation, error) { + s, err := getSchedule(p.Source) + if err != nil { + return nil, err + } + ids, err := h.legacyDB.FindAllRotationIDsFromScheduleID(p.Context, s.ID) + if err != nil { + return nil, err + } + + var result []rotation.Rotation + for _, id := range ids { + + r, err := h.c.RotationStore.FindRotation(p.Context, id) + if err != nil { + return nil, err + } + result = append(result, *r) + } + + return result, nil +} + +func (h *Handler) scheduleFields() g.Fields { + return g.Fields{ + "id": &g.Field{Type: g.String}, + "name": &g.Field{Type: g.String}, + "description": &g.Field{Type: g.String}, + "time_zone": &g.Field{Type: g.String}, + "target_type": targetTypeField(assignment.TargetTypeSchedule), + "escalation_policies": &g.Field{ + Type: g.NewList(h.escalationPolicy), + Description: "List of escalation policies currently using this schedule", + Resolve: func(p g.ResolveParams) (interface{}, error) { + s, err := getSchedule(p.Source) + if err != nil { + return nil, err + } + scrub := newScrubber(p.Context).scrub + + return scrub(h.c.EscalationStore.FindAllPoliciesBySchedule(p.Context, s.ID)) + }, + }, + + "user_overrides": &g.Field{ + Type: g.NewList(h.userOverride), + Args: g.FieldConfigArgument{ + "start_time": &g.ArgumentConfig{Type: g.NewNonNull(g.String)}, + "end_time": &g.ArgumentConfig{Type: g.NewNonNull(g.String)}, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + s, err := getSchedule(p.Source) + if err != nil { + return nil, err + } + scrub := newScrubber(p.Context).scrub + startStr, _ := p.Args["start_time"].(string) + endStr, _ := p.Args["end_time"].(string) + + start, err := time.Parse(time.RFC3339, startStr) + if err != nil { + return nil, validation.NewFieldError("start_time", err.Error()) + } + end, err := time.Parse(time.RFC3339, endStr) + if err != nil { + return nil, validation.NewFieldError("end_time", err.Error()) + } + + return scrub(h.c.OverrideStore.FindAllUserOverrides(p.Context, start, end, assignment.ScheduleTarget(s.ID))) + }, + }, + + "rotations": &g.Field{ + Type: g.NewList(h.rotation), + Resolve: func(p g.ResolveParams) (interface{}, error) { return newScrubber(p.Context).scrub(h.getRotations(p)) }, + }, + + // rotation stuff + "type": &g.Field{ + Type: rotationTypeEnum, + DeprecationReason: "Use the 'rotations' field.", + Resolve: func(p g.ResolveParams) (interface{}, error) { + r, err := h.getRotation(p) + return newScrubber(p.Context).scrub(r.Type, err) + }, + }, + + "start_time": &g.Field{ + Type: ISOTimestamp, + DeprecationReason: "Use the 'rotations' field.", + Resolve: func(p g.ResolveParams) (interface{}, error) { + r, err := h.getRotation(p) + return newScrubber(p.Context).scrub(r.Start, err) + }, + }, + + "shift_length": &g.Field{ + Type: g.Int, + DeprecationReason: "Use the 'rotations' field.", + Resolve: func(p g.ResolveParams) (interface{}, error) { + r, err := h.getRotation(p) + return newScrubber(p.Context).scrub(r.ShiftLength, err) + }, + }, + + "assignments": &g.Field{ + Type: g.NewList(h.scheduleAssignment), + Args: g.FieldConfigArgument{ + "start_time": &g.ArgumentConfig{Type: g.NewNonNull(g.String)}, + "end_time": &g.ArgumentConfig{Type: g.NewNonNull(g.String)}, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + sched, err := getSchedule(p.Source) + if err != nil { + return nil, err + } + + scrub := newScrubber(p.Context).scrub + + startStr, _ := p.Args["start_time"].(string) + endStr, _ := p.Args["end_time"].(string) + + start, err := time.Parse(time.RFC3339, startStr) + if err != nil { + return nil, validation.NewFieldError("start_time", err.Error()) + } + end, err := time.Parse(time.RFC3339, endStr) + if err != nil { + return nil, validation.NewFieldError("end_time", err.Error()) + } + return scrub(h.c.ShiftCalc.ScheduleAssignments(p.Context, start, end, sched.ID)) + }, + }, + + "final_shifts": &g.Field{ + Type: g.NewList(h.scheduleShift), + Args: g.FieldConfigArgument{ + "start_time": &g.ArgumentConfig{Type: g.NewNonNull(g.String)}, + "end_time": &g.ArgumentConfig{Type: g.NewNonNull(g.String)}, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + sched, err := getSchedule(p.Source) + if err != nil { + return nil, err + } + + scrub := newScrubber(p.Context).scrub + + startStr, _ := p.Args["start_time"].(string) + endStr, _ := p.Args["end_time"].(string) + + start, err := time.Parse(time.RFC3339, startStr) + if err != nil { + return nil, validation.NewFieldError("start_time", err.Error()) + } + end, err := time.Parse(time.RFC3339, endStr) + if err != nil { + return nil, validation.NewFieldError("end_time", err.Error()) + } + return scrub(h.c.ShiftCalc.ScheduleFinalShifts(p.Context, start, end, sched.ID)) + }, + }, + + "final_shifts_with_overrides": &g.Field{ + Type: g.NewList(h.scheduleShift), + Args: g.FieldConfigArgument{ + "start_time": &g.ArgumentConfig{Type: g.NewNonNull(g.String)}, + "end_time": &g.ArgumentConfig{Type: g.NewNonNull(g.String)}, + "v2": &g.ArgumentConfig{Type: g.Boolean}, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + sched, err := getSchedule(p.Source) + if err != nil { + return nil, err + } + + scrub := newScrubber(p.Context).scrub + + startStr, _ := p.Args["start_time"].(string) + endStr, _ := p.Args["end_time"].(string) + + start, err := time.Parse(time.RFC3339, startStr) + if err != nil { + return nil, validation.NewFieldError("start_time", err.Error()) + } + end, err := time.Parse(time.RFC3339, endStr) + if err != nil { + return nil, validation.NewFieldError("end_time", err.Error()) + } + + if end.After(start.In(time.UTC).AddDate(0, 1, 5)) { + return nil, validation.NewFieldError("end_time", "must not be more than 1 month beyond start_time") + } + if !end.After(start) { + return nil, validation.NewFieldError("end_time", "must be after start_time") + } + + if v2, _ := p.Args["v2"].(bool); v2 { + return h.c.OnCallStore.HistoryBySchedule(p.Context, sched.ID, start, end) + } + return scrub(h.c.ShiftCalc.ScheduleFinalShiftsWithOverrides(p.Context, start, end, sched.ID)) + }, + }, + } +} + +func (h *Handler) scheduleField() *g.Field { + return &g.Field{ + Type: h.schedule, + Args: g.FieldConfigArgument{ + "id": &g.ArgumentConfig{ + Type: g.NewNonNull(g.String), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + id, _ := p.Args["id"].(string) + return newScrubber(p.Context).scrub(h.c.ScheduleStore.FindOne(p.Context, id)) + }, + } +} +func (h *Handler) schedulesField() *g.Field { + return &g.Field{ + Type: g.NewList(h.schedule), + Resolve: func(p g.ResolveParams) (interface{}, error) { + return newScrubber(p.Context).scrub(h.c.ScheduleStore.FindAll(p.Context)) + }, + } +} + +func (h *Handler) createScheduleField() *g.Field { + return &g.Field{ + Type: h.schedule, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewNonNull(g.NewInputObject(g.InputObjectConfig{ + Name: "CreateScheduleInput", + Description: "Create a schedule.", + Fields: g.InputObjectConfigFieldMap{ + "name": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "description": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "time_zone": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "default_rotation": &g.InputObjectFieldConfig{Type: g.NewInputObject(g.InputObjectConfig{ + Name: "DefaultRotationFields", + Fields: g.InputObjectConfigFieldMap{ + "type": &g.InputObjectFieldConfig{Type: g.NewNonNull(rotationTypeEnum)}, + "start_time": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "shift_length": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Int)}, + }, + })}, + }, + })), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input") + } + scrub := newScrubber(p.Context).scrub + + var s schedule.Schedule + var r rotation.Rotation + s.Name, _ = m["name"].(string) + s.Description, _ = m["description"].(string) + z, _ := m["time_zone"].(string) + + var err error + s.TimeZone, err = util.LoadLocation(z) + if err != nil { + return scrub(nil, validation.NewFieldError("Timezone", err.Error())) + } + + rot, ok := m["default_rotation"].(map[string]interface{}) + if !ok { + // no default, just create it + return scrub(h.c.ScheduleStore.Create(p.Context, &s)) + } + + // Creating default rotation + r.Name = s.Name + " Rotation" + r.ShiftLength, _ = rot["shift_length"].(int) + r.Type, _ = rot["type"].(rotation.Type) + + tz, _ := rot["time_zone"].(string) + if tz == "" { + tz = s.TimeZone.String() + } + loc, err := util.LoadLocation(tz) + if err != nil { + return nil, validation.NewFieldError("time_zone", err.Error()) + } + + sTime, _ := rot["start_time"].(string) + r.Start, err = time.Parse(time.RFC3339, sTime) + if err != nil { + return nil, validation.NewFieldError("start_time", err.Error()) + } + r.Start = r.Start.In(loc) + tx, err := h.c.DB.BeginTx(p.Context, nil) + if err != nil { + return scrub(nil, err) + } + defer tx.Rollback() + + // need to create a rotation, a schedule, a rule, and then point the rule to the rotation + newSched, err := h.c.ScheduleStore.CreateScheduleTx(p.Context, tx, &s) + if err != nil { + return scrub(nil, errors.Wrap(err, "create schedule")) + } + + newRot, err := h.c.RotationStore.CreateRotationTx(p.Context, tx, &r) + if err != nil { + return scrub(nil, errors.Wrap(err, "create rotation for new schedule")) + } + + _, err = h.c.ScheduleRuleStore.CreateRuleTx(p.Context, tx, rule.NewAlwaysActive(newSched.ID, assignment.RotationTarget(newRot.ID))) + if err != nil { + return scrub(nil, errors.Wrap(err, "create rule for new schedule and rotation")) + } + + err = tx.Commit() + if err != nil { + return scrub(nil, err) + } + + return newSched, nil + }, + } +} + +func (h *Handler) updateSchedule() *g.Field { + return &g.Field{ + Type: h.schedule, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewNonNull(g.NewInputObject(g.InputObjectConfig{ + Name: "UpdateScheduleInput", + Description: "Update a schedule.", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.String, Description: "Specifies an existing schedule to update."}, + "name": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "description": &g.InputObjectFieldConfig{Type: g.String}, + "time_zone": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + })), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input") + } + var s schedule.Schedule + s.ID, _ = m["id"].(string) + s.Name, _ = m["name"].(string) + s.Description, _ = m["description"].(string) + z, _ := m["time_zone"].(string) + + var err error + s.TimeZone, err = util.LoadLocation(z) + if err != nil { + return newScrubber(p.Context).scrub(nil, errors.Wrap(err, "parse time_zone")) + } + + err = h.c.ScheduleStore.Update(p.Context, &s) + if err != nil { + return newScrubber(p.Context).scrub(nil, errors.Wrap(err, "update schedule")) + } + return s, nil + }, + } +} + +func (h *Handler) deleteScheduleField() *g.Field { + return &g.Field{ + Type: g.NewObject(g.ObjectConfig{ + Name: "DeleteScheduleOutput", + Fields: g.Fields{"deleted_id": &g.Field{Type: g.String}}, + }), + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewNonNull(g.NewInputObject(g.InputObjectConfig{ + Name: "DeleteScheduleInput", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + })), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + var r struct { + ID string `json:"deleted_id"` + } + r.ID, _ = m["id"].(string) + return newScrubber(p.Context).scrub(r, h.c.ScheduleStore.Delete(p.Context, r.ID)) + }, + } +} diff --git a/graphql/scheduleassignment.go b/graphql/scheduleassignment.go new file mode 100644 index 0000000000..de424211bb --- /dev/null +++ b/graphql/scheduleassignment.go @@ -0,0 +1,139 @@ +package graphql + +import ( + "fmt" + "github.com/target/goalert/assignment" + "github.com/target/goalert/schedule/shiftcalc" + "github.com/target/goalert/util/log" + "github.com/target/goalert/validation/validate" + + g "github.com/graphql-go/graphql" + "github.com/pkg/errors" +) + +func getScheduleAssignment(src interface{}) (*shiftcalc.ScheduleAssignment, error) { + switch s := src.(type) { + case *shiftcalc.ScheduleAssignment: + return s, nil + case shiftcalc.ScheduleAssignment: + return &s, nil + default: + return nil, fmt.Errorf("could not id of user (unknown source type %T)", s) + } +} + +func (h *Handler) scheduleAssignmentFields() g.Fields { + return g.Fields{ + "id": &g.Field{ + Type: g.String, + Resolve: func(p g.ResolveParams) (interface{}, error) { + asn, err := getScheduleAssignment(p.Source) + if err != nil { + return nil, err + } + return fmt.Sprintf("Schedule(%s)/%s(%s)", asn.ScheduleID, asn.Target.TargetType(), asn.Target.TargetID()), nil + }, + }, + "schedule_id": &g.Field{Type: g.String}, + "target_id": &g.Field{ + Type: g.String, + Resolve: func(p g.ResolveParams) (interface{}, error) { + asn, err := getScheduleAssignment(p.Source) + if err != nil { + return nil, err + } + return asn.Target.TargetID(), nil + }, + }, + "target_type": &g.Field{ + Type: g.String, + Resolve: func(p g.ResolveParams) (interface{}, error) { + asn, err := getScheduleAssignment(p.Source) + if err != nil { + return nil, err + } + return asn.Target.TargetType().String(), nil + }, + }, + "rotation": &g.Field{ + Type: h.rotation, + Resolve: func(p g.ResolveParams) (interface{}, error) { + asn, err := getScheduleAssignment(p.Source) + if err != nil { + return nil, err + } + + if asn.Target.TargetType() != assignment.TargetTypeRotation { + return nil, nil + } + + return newScrubber(p.Context).scrub(h.c.RotationStore.FindRotation(p.Context, asn.Target.TargetID())) + }, + }, + "user": &g.Field{ + Type: h.user, + Resolve: func(p g.ResolveParams) (interface{}, error) { + asn, err := getScheduleAssignment(p.Source) + if err != nil { + return nil, err + } + + if asn.Target.TargetType() != assignment.TargetTypeUser { + return nil, nil + } + + return newScrubber(p.Context).scrub(h.c.UserStore.FindOne(p.Context, asn.Target.TargetID())) + }, + }, + "rules": &g.Field{Type: g.NewList(h.scheduleRule)}, + "shifts": &g.Field{Type: g.NewList(h.scheduleShift)}, + } +} + +func (h *Handler) deleteScheduleAssignmentField() *g.Field { + return &g.Field{ + Type: h.schedule, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "DeleteScheduleAssignmentInput", + Fields: g.InputObjectConfigFieldMap{ + "schedule_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "target_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "target_type": &g.InputObjectFieldConfig{Type: g.NewNonNull(assignmentTargetType)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + schedID, _ := m["schedule_id"].(string) + err := validate.UUID("ScheduleID", schedID) + if err != nil { + return nil, err + } + + var ctx = log.WithField(p.Context, "ScheduleID", schedID) + + var asnTarget assignment.RawTarget + asnTarget.ID, _ = m["target_id"].(string) + asnTarget.Type, _ = m["target_type"].(assignment.TargetType) + + err = validate.UUID("TargetID", asnTarget.ID) + if err != nil { + return nil, err + } + + err = h.c.ScheduleRuleStore.DeleteByTarget(ctx, schedID, asnTarget) + if err != nil { + return newScrubber(ctx).scrub(nil, err) + } + + return newScrubber(ctx).scrub(h.c.ScheduleStore.FindOne(ctx, schedID)) + }, + } +} diff --git a/graphql/schedulerule.go b/graphql/schedulerule.go new file mode 100644 index 0000000000..13e07874a5 --- /dev/null +++ b/graphql/schedulerule.go @@ -0,0 +1,288 @@ +package graphql + +import ( + "fmt" + "github.com/target/goalert/assignment" + "github.com/target/goalert/schedule/rule" + "github.com/target/goalert/util/log" + "github.com/target/goalert/validation" + "github.com/target/goalert/validation/validate" + "time" + + g "github.com/graphql-go/graphql" + "github.com/pkg/errors" +) + +// type Rule struct { +// ID string +// ScheduleID string +// Days [7]bool +// Start time.Time +// End time.Time +// } +func getScheduleRule(src interface{}) (*rule.Rule, error) { + switch r := src.(type) { + case *rule.Rule: + return r, nil + case rule.Rule: + return &r, nil + default: + return nil, fmt.Errorf("could not id of user (unknown source type %T)", r) + } +} +func resolveDay(n time.Weekday) g.FieldResolveFn { + return func(p g.ResolveParams) (interface{}, error) { + rule, err := getScheduleRule(p.Source) + if err != nil { + return nil, err + } + return rule.Day(n), nil + } +} +func (h *Handler) scheduleRuleFields() g.Fields { + return g.Fields{ + "id": &g.Field{Type: g.String}, + + "sunday": &g.Field{Type: g.Boolean, Resolve: resolveDay(time.Sunday)}, + "monday": &g.Field{Type: g.Boolean, Resolve: resolveDay(time.Monday)}, + "tuesday": &g.Field{Type: g.Boolean, Resolve: resolveDay(time.Tuesday)}, + "wednesday": &g.Field{Type: g.Boolean, Resolve: resolveDay(time.Wednesday)}, + "thursday": &g.Field{Type: g.Boolean, Resolve: resolveDay(time.Thursday)}, + "friday": &g.Field{Type: g.Boolean, Resolve: resolveDay(time.Friday)}, + "saturday": &g.Field{Type: g.Boolean, Resolve: resolveDay(time.Saturday)}, + + "start": &g.Field{Type: HourTime}, + "end": &g.Field{Type: HourTime}, + + "summary": &g.Field{ + Type: g.String, + Resolve: func(p g.ResolveParams) (interface{}, error) { + r, err := getScheduleRule(p.Source) + if err != nil { + return nil, err + } + + return r.String(), nil + }, + }, + } +} + +var schedRuleTarget = g.NewEnum(g.EnumConfig{ + Name: "ScheduleRuleTarget", + Values: g.EnumValueConfigMap{ + "user": &g.EnumValueConfig{Value: assignment.TargetTypeUser}, + "rotation": &g.EnumValueConfig{Value: assignment.TargetTypeRotation}, + }, +}) + +func (h *Handler) createScheduleRuleField() *g.Field { + return &g.Field{ + Type: h.schedule, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "CreateScheduleRuleInput", + Fields: g.InputObjectConfigFieldMap{ + "schedule_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + + "sunday": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Boolean)}, + "monday": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Boolean)}, + "tuesday": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Boolean)}, + "wednesday": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Boolean)}, + "thursday": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Boolean)}, + "friday": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Boolean)}, + "saturday": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Boolean)}, + + "start": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "end": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + + "target_type": &g.InputObjectFieldConfig{Type: g.NewNonNull(schedRuleTarget)}, + "target_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + var r rule.Rule + r.ScheduleID, _ = m["schedule_id"].(string) + err := validate.UUID("schedule_id", r.ScheduleID) + if err != nil { + return nil, err + } + + var asnTarget assignment.RawTarget + asnTarget.Type, _ = m["target_type"].(assignment.TargetType) + asnTarget.ID, _ = m["target_id"].(string) + err = validate.UUID("target_id", asnTarget.ID) + if err != nil { + return nil, err + } + + var e bool + e, _ = m["sunday"].(bool) + r.SetDay(time.Sunday, e) + e, _ = m["monday"].(bool) + r.SetDay(time.Monday, e) + e, _ = m["tuesday"].(bool) + r.SetDay(time.Tuesday, e) + e, _ = m["wednesday"].(bool) + r.SetDay(time.Wednesday, e) + e, _ = m["thursday"].(bool) + r.SetDay(time.Thursday, e) + e, _ = m["friday"].(bool) + r.SetDay(time.Friday, e) + e, _ = m["saturday"].(bool) + r.SetDay(time.Saturday, e) + + startStr, _ := m["start"].(string) + endStr, _ := m["end"].(string) + r.Start, err = rule.ParseClock(startStr) + if err != nil { + return nil, validation.NewFieldError("start", err.Error()) + } + r.End, err = rule.ParseClock(endStr) + if err != nil { + return nil, validation.NewFieldError("end", err.Error()) + } + r.Target = asnTarget + + scrub := newScrubber(p.Context).scrub + + _, err = h.c.ScheduleRuleStore.Add(p.Context, &r) + if err != nil { + return scrub(nil, err) + } + + return scrub(h.c.ScheduleStore.FindOne(p.Context, r.ScheduleID)) + }, + } +} + +func (h *Handler) updateScheduleRuleField() *g.Field { + return &g.Field{ + Type: h.schedule, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "UpdateScheduleRuleInput", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + + "sunday": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Boolean)}, + "monday": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Boolean)}, + "tuesday": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Boolean)}, + "wednesday": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Boolean)}, + "thursday": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Boolean)}, + "friday": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Boolean)}, + "saturday": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.Boolean)}, + + "start": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "end": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + var r rule.Rule + r.ID, _ = m["id"].(string) + err := validate.UUID("id", r.ID) + if err != nil { + return nil, err + } + p.Context = log.WithField(p.Context, "ScheduleRuleID", r.ID) + scrub := newScrubber(p.Context).scrub + oldRule, err := h.c.ScheduleRuleStore.FindOne(p.Context, r.ID) + if err != nil { + return scrub(nil, err) + } + r.ScheduleID = oldRule.ScheduleID + r.Target = oldRule.Target + + var e bool + e, _ = m["sunday"].(bool) + r.SetDay(time.Sunday, e) + e, _ = m["monday"].(bool) + r.SetDay(time.Monday, e) + e, _ = m["tuesday"].(bool) + r.SetDay(time.Tuesday, e) + e, _ = m["wednesday"].(bool) + r.SetDay(time.Wednesday, e) + e, _ = m["thursday"].(bool) + r.SetDay(time.Thursday, e) + e, _ = m["friday"].(bool) + r.SetDay(time.Friday, e) + e, _ = m["saturday"].(bool) + r.SetDay(time.Saturday, e) + + startStr, _ := m["start"].(string) + endStr, _ := m["end"].(string) + r.Start, err = rule.ParseClock(startStr) + if err != nil { + return nil, validation.NewFieldError("start", err.Error()) + } + r.End, err = rule.ParseClock(endStr) + if err != nil { + return nil, validation.NewFieldError("end", err.Error()) + } + + err = h.c.ScheduleRuleStore.Update(p.Context, &r) + if err != nil { + return scrub(nil, err) + } + + return scrub(h.c.ScheduleStore.FindOne(p.Context, r.ScheduleID)) + }, + } +} + +func (h *Handler) deleteScheduleRuleField() *g.Field { + return &g.Field{ + Type: h.schedule, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "DeleteScheduleRuleInput", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + id, _ := m["id"].(string) + err := validate.UUID("id", id) + if err != nil { + return nil, err + } + p.Context = log.WithField(p.Context, "ScheduleRuleID", id) + + schedID, err := h.c.ScheduleRuleStore.FindScheduleID(p.Context, id) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + + err = h.c.ScheduleRuleStore.Delete(p.Context, id) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + + return newScrubber(p.Context).scrub(h.c.ScheduleStore.FindOne(p.Context, schedID)) + }, + } +} diff --git a/graphql/scheduleshift.go b/graphql/scheduleshift.go new file mode 100644 index 0000000000..a8e2b49baf --- /dev/null +++ b/graphql/scheduleshift.go @@ -0,0 +1,38 @@ +package graphql + +import ( + "fmt" + "github.com/target/goalert/oncall" + "github.com/target/goalert/schedule/shiftcalc" + + g "github.com/graphql-go/graphql" +) + +func (h *Handler) scheduleShiftFields() g.Fields { + return g.Fields{ + "start_time": &g.Field{Type: ISOTimestamp}, + "end_time": &g.Field{Type: ISOTimestamp}, + "truncated": &g.Field{Type: g.Boolean}, + "user_id": &g.Field{Type: g.String}, + "user": &g.Field{ + Type: h.user, + Resolve: func(p g.ResolveParams) (interface{}, error) { + var userID string + switch s := p.Source.(type) { + case *shiftcalc.Shift: + userID = s.UserID + case shiftcalc.Shift: + userID = s.UserID + case oncall.Shift: + userID = s.UserID + case *oncall.Shift: + userID = s.UserID + default: + return nil, fmt.Errorf("could not id of user (unknown source type %T)", s) + } + + return newScrubber(p.Context).scrub(h.c.UserStore.FindOne(p.Context, userID)) + }, + }, + } +} diff --git a/graphql/schema.go b/graphql/schema.go new file mode 100644 index 0000000000..132487b716 --- /dev/null +++ b/graphql/schema.go @@ -0,0 +1,119 @@ +package graphql + +import ( + g "github.com/graphql-go/graphql" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +func wrapFieldResolver(fn g.FieldResolveFn) g.FieldResolveFn { + return func(p g.ResolveParams) (interface{}, error) { + ctx, span := trace.StartSpan(p.Context, "GraphQL."+p.Info.ParentType.Name()+"."+p.Info.FieldName) + defer span.End() + p.Context = ctx + val, err := fn(p) + if err != nil { + span.Annotate([]trace.Attribute{trace.BoolAttribute("error", true)}, err.Error()) + } + return val, err + } +} + +func (h *Handler) buildSchema() error { + queryFields := g.Fields{ + "currentUser": h.currentUserField(), + "user": h.userField(), + "users": h.usersField(), + "alert": h.alertField(), + "alerts": h.alertsField(), + "alerts2": h.searchAlertsField(), + "alertSummaries": h.alertSummariesField(), + "service": h.serviceField(), + "services": h.servicesField(), + "services2": h.searchServicesField(), + "schedules": h.schedulesField(), + "rotations": h.rotationsField(), + "schedule": h.scheduleField(), + "rotation": h.rotationField(), + "escalationPolicy": h.escalationPolicyField(), + "escalationPolicies": h.escalationPoliciesField(), + "integrationKey": h.integrationKeyField(), + "integrationKeys": h.integrationKeysField(), + "alertLogs": h.searchAlertLogsField(), + "labelKeys": h.labelKeysField(), + } + + for _, f := range queryFields { + f.Resolve = wrapFieldResolver(f.Resolve) + } + + rootQuery := g.ObjectConfig{Name: "RootQuery", Fields: queryFields} + mutFields := g.Fields{ + "updateUser": h.updateUserField(), + "deleteSchedule": h.deleteScheduleField(), + "createSchedule": h.createScheduleField(), + "updateSchedule": h.updateSchedule(), + "createAlert": h.createAlertField(), + "updateAlertStatus": h.updateStatusAlertField(), + "updateAlertStatusByService": h.updateAlertStatusByServiceField(), + "escalateAlert": h.escalateAlertField(), + "updateNotificationRule": h.updateNotificationRuleField(), + "createNotificationRule": h.createNotificationRuleField(), + "deleteNotificationRule": h.deleteNotificationRuleField(), + "updateContactMethod": h.updateContactMethodField(), + "createContactMethod": h.createContactMethodField(), + "deleteContactMethod": h.deleteContactMethodField(), + "addRotationParticipant": h.addRotationParticipantField(), + "deleteRotationParticipant": h.deleteRotationParticipantField(), + "moveRotationParticipant": h.moveRotationParticipantField(), + "setActiveParticipant": h.setActiveParticipantField(), + "createOrUpdateEscalationPolicyStep": h.createOrUpdateEscalationPolicyStepField(), + "addEscalationPolicyStepTarget": h.addEscalationPolicyStepTargetField(), + "deleteEscalationPolicyStepTarget": h.deleteEscalationPolicyStepTargetField(), + "deleteEscalationPolicy": h.deleteEscalationPolicyField(), + "deleteEscalationPolicyStep": h.deleteEscalationPolicyStepField(), + "moveEscalationPolicyStep": h.moveEscalationPolicyStepField(), + "createOrUpdateEscalationPolicy": h.createOrUpdateEscalationPolicyField(), + "createService": h.createServiceField(), + "updateService": h.updateServiceField(), + "deleteService": h.deleteServiceField(), + "createIntegrationKey": h.createIntegrationKeyField(), + "deleteIntegrationKey": h.deleteIntegrationKeyField(), + "createOrUpdateRotation": h.createOrUpdateRotationField(), + "deleteScheduleRule": h.deleteScheduleRuleField(), + "deleteScheduleAssignment": h.deleteScheduleAssignmentField(), + "updateScheduleRule": h.updateScheduleRuleField(), + "createScheduleRule": h.createScheduleRuleField(), + "addRotationParticipant2": h.addRotationParticipant2Field(), + "deleteRotationParticipant2": h.deleteRotationParticipant2Field(), + "moveRotationParticipant2": h.moveRotationParticipant2Field(), + "deleteRotation": h.deleteRotationField(), + "createAll": h.createAllField(), + "updateConfigLimit": h.updateConfigLimitField(), + "sendContactMethodTest": h.sendContactMethodTest(), + "sendContactMethodVerification": h.sendContactMethodVerification(), + "verifyContactMethod": h.verifyContactMethod(), + "deleteHeartbeatMonitor": h.deleteHeartbeatMonitorField(), + "updateUserOverride": h.updateUserOverrideField(), + "deleteAll": h.deleteAllField(), + "setUserFavorite": h.setUserFavoriteField(), + "unsetUserFavorite": h.unsetUserFavoriteField(), + "setLabel": h.setLabelField(), + } + for _, f := range mutFields { + f.Resolve = wrapFieldResolver(f.Resolve) + } + rootMutation := g.ObjectConfig{Name: "RootMutation", Fields: mutFields} + + schemaConfig := g.SchemaConfig{ + Query: g.NewObject(rootQuery), + Mutation: g.NewObject(rootMutation), + } + schema, err := g.NewSchema(schemaConfig) + if err != nil { + return errors.Wrap(err, "generate GraphQL schema") + } + + h.schema = schema + return nil +} diff --git a/graphql/service.go b/graphql/service.go new file mode 100644 index 0000000000..f7e8206f6a --- /dev/null +++ b/graphql/service.go @@ -0,0 +1,311 @@ +package graphql + +import ( + "errors" + "fmt" + "github.com/target/goalert/alert" + "github.com/target/goalert/permission" + "github.com/target/goalert/service" + "github.com/target/goalert/validation" + + g "github.com/graphql-go/graphql" +) + +func getService(src interface{}) (*service.Service, error) { + switch s := src.(type) { + case service.Service: + return &s, nil + case *service.Service: + return s, nil + default: + return nil, fmt.Errorf("unknown source type %T", s) + } +} +func (h *Handler) serviceOnCallUserFields() g.Fields { + return g.Fields{ + "user_id": &g.Field{Type: g.String}, + "user_name": &g.Field{Type: g.String}, + "step_number": &g.Field{Type: g.Int}, + } +} +func (h *Handler) serviceFields() g.Fields { + return g.Fields{ + "id": &g.Field{Type: g.String}, + "name": &g.Field{Type: g.String}, + "description": &g.Field{Type: g.String}, + "is_user_favorite": &g.Field{ + Type: g.Boolean, + Description: "Indicates this service has been marked as a favorite by the user.", + Resolve: func(p g.ResolveParams) (interface{}, error) { + svc, err := getService(p.Source) + if err != nil { + return nil, err + } + return svc.IsUserFavorite(), nil + }, + }, + "escalation_policy_id": &g.Field{Type: g.String}, + "escalation_policy_name": &g.Field{ + Type: g.String, + Resolve: func(p g.ResolveParams) (interface{}, error) { + svc, err := getService(p.Source) + if err != nil { + return nil, err + } + return svc.EscalationPolicyName(), nil + }, + }, + "labels": &g.Field{ + Type: g.NewList(h.label), + Resolve: func(p g.ResolveParams) (interface{}, error) { + svc, err := getService(p.Source) + if err != nil { + return nil, err + } + scrub := newScrubber(p.Context).scrub + return scrub(h.c.LabelStore.FindAllByService(p.Context, svc.ID)) + }, + }, + "escalation_policy": &g.Field{ + Type: h.escalationPolicy, + Resolve: func(p g.ResolveParams) (interface{}, error) { + svc, err := getService(p.Source) + if err != nil { + return nil, err + } + scrub := newScrubber(p.Context).scrub + return scrub(h.c.EscalationStore.FindOnePolicy(p.Context, svc.EscalationPolicyID)) + }, + }, + "integration_keys": &g.Field{ + Type: g.NewList(h.integrationKey), + Resolve: func(p g.ResolveParams) (interface{}, error) { + s, err := getService(p.Source) + if err != nil { + return nil, err + } + return newScrubber(p.Context).scrub(h.c.IntegrationKeyStore.FindAllByService(p.Context, s.ID)) + }, + }, + "heartbeat_monitors": &g.Field{ + Type: g.NewList(h.heartbeat), + Resolve: func(p g.ResolveParams) (interface{}, error) { + s, err := getService(p.Source) + if err != nil { + return nil, err + } + return newScrubber(p.Context).scrub(h.c.HeartbeatStore.FindAllByService(p.Context, s.ID)) + }, + }, + "on_call_users": &g.Field{ + Type: g.NewList(h.serviceOnCallUser), + Resolve: func(p g.ResolveParams) (interface{}, error) { + s, err := getService(p.Source) + if err != nil { + return nil, err + } + return newScrubber(p.Context).scrub(h.c.OnCallStore.OnCallUsersByService(p.Context, s.ID)) + }, + }, + "alerts": &g.Field{ + Type: g.NewList(h.alert), + Resolve: func(p g.ResolveParams) (interface{}, error) { + s, err := getService(p.Source) + if err != nil { + return nil, err + } + a, _, err := h.c.AlertStore.LegacySearch(p.Context, &alert.LegacySearchOptions{ServiceID: s.ID}) + return newScrubber(p.Context).scrub(a, err) + }, + }, + } +} + +func (h *Handler) serviceField() *g.Field { + return &g.Field{ + Type: h.service, + Args: g.FieldConfigArgument{ + "id": &g.ArgumentConfig{ + Type: g.NewNonNull(g.String), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + id, ok := p.Args["id"].(string) + if !ok { + return nil, validation.NewFieldError("id", "required") + } + userID := permission.UserID(p.Context) + + return newScrubber(p.Context).scrub(h.c.ServiceStore.FindOneForUser(p.Context, userID, id)) + }, + } +} + +func (h *Handler) servicesField() *g.Field { + return &g.Field{ + Type: g.NewList(h.service), + DeprecationReason: "Use services2 instead.", + Resolve: func(p g.ResolveParams) (interface{}, error) { + return newScrubber(p.Context).scrub(h.c.ServiceStore.FindAll(p.Context)) + }, + } +} + +func (h *Handler) searchServicesField() *g.Field { + return &g.Field{ + Args: g.FieldConfigArgument{ + "options": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "ServiceSearchOptions", + Fields: g.InputObjectConfigFieldMap{ + "search": &g.InputObjectFieldConfig{ + Type: g.String, + Description: "Searches for case-insensitive service name or description substring match.", + }, + "favorites_only": &g.InputObjectFieldConfig{Description: "Only include services marked as favorites by the current user.", Type: g.Boolean}, + "favorites_first": &g.InputObjectFieldConfig{Description: "Raise favorite services to the top of results.", Type: g.Boolean}, + "limit": &g.InputObjectFieldConfig{Description: "Limit the number of results.", Type: g.Int}, + }, + }), + }, + }, + Type: g.NewObject(g.ObjectConfig{ + Name: "ServiceSearchResult", + Fields: g.Fields{ + "items": &g.Field{Type: g.NewList(h.service)}, + "total_count": &g.Field{DeprecationReason: "Preserved for compatibility, represents the length of `items`. Will never be greater than `limit`.", Type: g.Int}, + }, + }), + Resolve: func(p g.ResolveParams) (interface{}, error) { + var result struct { + Items []service.Service `json:"items"` + Total int `json:"total_count"` + } + var opts service.LegacySearchOptions + if m, ok := p.Args["options"].(map[string]interface{}); ok { + opts.Search, _ = m["search"].(string) + opts.FavoritesOnly, _ = m["favorites_only"].(bool) + opts.FavoritesFirst, _ = m["favorites_first"].(bool) + opts.Limit, _ = m["limit"].(int) + } + opts.FavoritesUserID = permission.UserID(p.Context) + + s, err := h.c.ServiceStore.LegacySearch(p.Context, &opts) + if err != nil { + return newScrubber(p.Context).scrub(nil, err) + } + + result.Items = s + result.Total = len(s) + return newScrubber(p.Context).scrub(result, err) + }, + } +} + +func (h *Handler) createServiceField() *g.Field { + return &g.Field{ + Type: h.service, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Description: "because bugs.", + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "CreateServiceInput", + Fields: g.InputObjectConfigFieldMap{ + "name": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "description": &g.InputObjectFieldConfig{Type: g.String}, + "escalation_policy_id": &g.InputObjectFieldConfig{Type: g.String}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + scrub := newScrubber(p.Context).scrub + var s service.Service + s.Name, _ = m["name"].(string) + s.Description, _ = m["description"].(string) + s.EscalationPolicyID, _ = m["escalation_policy_id"].(string) + + return scrub(h.c.ServiceStore.Insert(p.Context, &s)) + }, + } +} + +func (h *Handler) updateServiceField() *g.Field { + return &g.Field{ + Type: h.service, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Description: "because bugs.", + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "UpdateServiceInput", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "name": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "description": &g.InputObjectFieldConfig{Type: g.String}, + "escalation_policy_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + scrub := newScrubber(p.Context).scrub + + var s service.Service + s.ID, _ = m["id"].(string) + s.Name, _ = m["name"].(string) + s.Description, _ = m["description"].(string) + s.EscalationPolicyID, _ = m["escalation_policy_id"].(string) + err := h.c.ServiceStore.Update(p.Context, &s) + if err != nil { + return scrub(nil, err) + } + userID := permission.UserID(p.Context) + return scrub(h.c.ServiceStore.FindOneForUser(p.Context, userID, s.ID)) + }, + } +} + +func (h *Handler) deleteServiceField() *g.Field { + return &g.Field{ + Type: g.NewObject(g.ObjectConfig{ + Name: "DeleteServiceOutput", + Fields: g.Fields{"deleted_service_id": &g.Field{Type: g.String}}, + }), + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Description: "because bugs.", + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "DeleteServiceInput", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + var s struct { + ID string `json:"deleted_service_id"` + } + + s.ID, _ = m["id"].(string) + + err := h.c.ServiceStore.Delete(p.Context, s.ID) + return newScrubber(p.Context).scrub(&s, err) + }, + } +} diff --git a/graphql/user.go b/graphql/user.go new file mode 100644 index 0000000000..68a4309d4c --- /dev/null +++ b/graphql/user.go @@ -0,0 +1,215 @@ +package graphql + +import ( + "errors" + "fmt" + "github.com/target/goalert/permission" + "github.com/target/goalert/user" + "github.com/target/goalert/validation" + + g "github.com/graphql-go/graphql" +) + +var userRoleEnum = g.NewEnum(g.EnumConfig{ + Name: "UserRole", + Values: g.EnumValueConfigMap{ + "admin": &g.EnumValueConfig{Value: permission.RoleAdmin}, + "user": &g.EnumValueConfig{Value: permission.RoleUser}, + }, +}) + +func getUser(src interface{}) (*user.User, error) { + switch u := src.(type) { + case *user.User: + return u, nil + case user.User: + return &u, nil + default: + return nil, fmt.Errorf("could not id of user (unknown source type %T)", u) + } +} + +func (h *Handler) userFields() g.Fields { + return g.Fields{ + "id": &g.Field{Type: g.String}, + "name": &g.Field{Type: g.String}, + "bio": &g.Field{Type: g.String, DeprecationReason: "Bio is no longer used or listed on the user details page"}, + "email": &g.Field{Type: g.String}, + "avatar_url": &g.Field{ + Type: g.String, + DeprecationReason: "Use /v1/api/users/{userID}/avatar instead.", + Resolve: func(p g.ResolveParams) (interface{}, error) { + u, err := getUser(p.Source) + if err != nil { + return nil, err + } + + // Return the same URL used previously (large). We can't return the + // new redirect-API URL here because it requires auth, and old UI code + // using it won't provide the token, which would result in broken images. + // + // So, for now, this field will use the new method and effectively return + // the same URL. Once the UI is updated, we can either remove this field + // or point it to the redirect URL. + return u.ResolveAvatarURL(true), nil + }, + }, + "role": &g.Field{Type: userRoleEnum}, + "first_name": &g.Field{ + Type: g.String, + DeprecationReason: "use 'name' instead", + Resolve: func(p g.ResolveParams) (interface{}, error) { return p.Source.(*user.User).Name, nil }, + }, + "last_name": &g.Field{ + Type: g.String, + DeprecationReason: "use 'name' instead", + Resolve: func(g.ResolveParams) (interface{}, error) { return "", nil }, + }, + "alert_status_log_contact_method_id": &g.Field{Type: g.String, Description: "Configures a contact method ID to be used for automatic status updates of alerts."}, + + "contact_methods": &g.Field{ + Type: g.NewList(h.contactMethod), + Resolve: func(p g.ResolveParams) (interface{}, error) { + u, err := getUser(p.Source) + if err != nil { + return nil, err + } + + return newScrubber(p.Context).scrub(h.c.CMStore.FindAll(p.Context, u.ID)) + }, + }, + + "notification_rules": &g.Field{ + Type: g.NewList(h.notificationRule), + Resolve: func(p g.ResolveParams) (interface{}, error) { + u, err := getUser(p.Source) + if err != nil { + return nil, err + } + + return newScrubber(p.Context).scrub(h.c.NRStore.FindAll(p.Context, u.ID)) + }, + }, + + "on_call": &g.Field{ + Type: g.Boolean, + Resolve: func(p g.ResolveParams) (interface{}, error) { + u, err := getUser(p.Source) + if err != nil { + return nil, err + } + scrub := newScrubber(p.Context).scrub + return scrub(h.c.Resolver.IsUserOnCall(p.Context, u.ID)) + }, + }, + + "on_call_assignments": &g.Field{ + Type: g.NewList(h.onCallAssignment), + Resolve: func(p g.ResolveParams) (interface{}, error) { + u, err := getUser(p.Source) + if err != nil { + return nil, err + } + scrub := newScrubber(p.Context).scrub + + return scrub(h.c.Resolver.OnCallByUser(p.Context, u.ID)) + }, + }, + } +} + +func (h *Handler) updateUserField() *g.Field { + return &g.Field{ + Type: h.user, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Description: "Update a user with provided fields.", + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "UpdateUserInput", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "name": &g.InputObjectFieldConfig{Type: g.String}, + "email": &g.InputObjectFieldConfig{Type: g.String}, + "avatar_url": &g.InputObjectFieldConfig{Type: g.String}, + "role": &g.InputObjectFieldConfig{Type: userRoleEnum}, + "alert_status_log_contact_method_id": &g.InputObjectFieldConfig{Type: g.String}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + scrub := newScrubber(p.Context).scrub + tx, err := h.legacyDB.db.BeginTx(p.Context, nil) + if err != nil { + return scrub(nil, err) + } + defer tx.Rollback() + + id, _ := m["id"].(string) + usr, err := h.c.UserStore.FindOneTx(p.Context, tx, id, true) + if err != nil { + return scrub(nil, err) + } + + usr.AlertStatusCMID, _ = m["alert_status_log_contact_method_id"].(string) + err = h.c.UserStore.UpdateTx(p.Context, tx, usr) + if err != nil { + return scrub(nil, err) + } + + err = tx.Commit() + if err != nil { + return scrub(nil, err) + } + return scrub(usr, nil) + }, + } +} + +func (h *Handler) currentUserField() *g.Field { + return &g.Field{ + Type: h.user, + Resolve: func(p g.ResolveParams) (interface{}, error) { + id := permission.UserID(p.Context) + if id == "" { + return nil, nil + } + + return newScrubber(p.Context).scrub(h.c.UserStore.FindOne(p.Context, id)) + }, + } +} + +func (h *Handler) userField() *g.Field { + return &g.Field{ + Type: h.user, + Args: g.FieldConfigArgument{ + "id": &g.ArgumentConfig{ + Type: g.NewNonNull(g.String), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + id, ok := p.Args["id"].(string) + if !ok { + return nil, validation.NewFieldError("id", "required") + } + + return newScrubber(p.Context).scrub(h.c.UserStore.FindOne(p.Context, id)) + }, + } +} +func (h *Handler) usersField() *g.Field { + return &g.Field{ + Name: "Users", + Type: g.NewList(h.user), + Resolve: func(p g.ResolveParams) (interface{}, error) { + + return newScrubber(p.Context).scrub(h.c.UserStore.FindAll(p.Context)) + }, + } +} diff --git a/graphql/userfavorite.go b/graphql/userfavorite.go new file mode 100644 index 0000000000..35f00ec955 --- /dev/null +++ b/graphql/userfavorite.go @@ -0,0 +1,74 @@ +package graphql + +import ( + "errors" + "github.com/target/goalert/assignment" + "github.com/target/goalert/permission" + + g "github.com/graphql-go/graphql" +) + +func (h *Handler) setUserFavoriteField() *g.Field { + return &g.Field{ + Type: h.assignmentTarget, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Description: "The target to set as a favorite.", + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "SetFavoriteInput", + Fields: g.InputObjectConfigFieldMap{ + "target_type": &g.InputObjectFieldConfig{Type: g.NewNonNull(assignmentTargetType)}, + "target_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + var tgt assignment.RawTarget + tgt.Type, _ = m["target_type"].(assignment.TargetType) + tgt.ID, _ = m["target_id"].(string) + + userID := permission.UserID(p.Context) + + err := h.c.UserFavoriteStore.Set(p.Context, userID, tgt) + return newScrubber(p.Context).scrub(tgt, err) + }, + } +} + +func (h *Handler) unsetUserFavoriteField() *g.Field { + return &g.Field{ + Type: h.assignmentTarget, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Description: "The target to unset as a favorite.", + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "UnsetFavoriteInput", + Fields: g.InputObjectConfigFieldMap{ + "target_type": &g.InputObjectFieldConfig{Type: g.NewNonNull(assignmentTargetType)}, + "target_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + + var tgt assignment.RawTarget + tgt.Type, _ = m["target_type"].(assignment.TargetType) + tgt.ID, _ = m["target_id"].(string) + + userID := permission.UserID(p.Context) + err := h.c.UserFavoriteStore.Unset(p.Context, userID, &tgt) + return newScrubber(p.Context).scrub(tgt, err) + }, + } +} diff --git a/graphql/useroverride.go b/graphql/useroverride.go new file mode 100644 index 0000000000..9ac9a63904 --- /dev/null +++ b/graphql/useroverride.go @@ -0,0 +1,157 @@ +package graphql + +import ( + "fmt" + "github.com/target/goalert/assignment" + "github.com/target/goalert/override" + "github.com/target/goalert/validation" + "time" + + g "github.com/graphql-go/graphql" + "github.com/pkg/errors" +) + +var userOverrideTargetType = g.NewEnum(g.EnumConfig{ + Name: "UserOverrideTargetType", + Values: g.EnumValueConfigMap{ + "schedule": &g.EnumValueConfig{Value: assignment.TargetTypeSchedule}, + }, +}) + +func getUserOverride(src interface{}) (*override.UserOverride, error) { + switch u := src.(type) { + case *override.UserOverride: + return u, nil + case override.UserOverride: + return &u, nil + default: + return nil, fmt.Errorf("could not get UserOverride (unknown source type %T)", u) + } +} + +func (h *Handler) userOverrideFields() g.Fields { + return g.Fields{ + "id": &g.Field{Type: g.String}, + + "add_user_id": &g.Field{Type: g.String}, + "remove_user_id": &g.Field{Type: g.String}, + + "add_user_name": &g.Field{ + Type: g.String, + Resolve: func(p g.ResolveParams) (interface{}, error) { + o, err := getUserOverride(p.Source) + if err != nil { + return nil, err + } + if o.AddUserID == "" { + return nil, nil + } + scrub := newScrubber(p.Context).scrub + + u, err := h.c.UserStore.FindOne(p.Context, o.AddUserID) + if err != nil { + return scrub(nil, err) + } + return u.Name, nil + }, + }, + "remove_user_name": &g.Field{ + Type: g.String, + Resolve: func(p g.ResolveParams) (interface{}, error) { + o, err := getUserOverride(p.Source) + if err != nil { + return nil, err + } + if o.RemoveUserID == "" { + return nil, nil + } + scrub := newScrubber(p.Context).scrub + + u, err := h.c.UserStore.FindOne(p.Context, o.RemoveUserID) + if err != nil { + return scrub(nil, err) + } + return u.Name, nil + }, + }, + "start_time": &g.Field{Type: ISOTimestamp}, + "end_time": &g.Field{Type: ISOTimestamp}, + + "target_id": &g.Field{ + Type: g.String, + Resolve: func(p g.ResolveParams) (interface{}, error) { + o, err := getUserOverride(p.Source) + if err != nil { + return nil, err + } + return o.Target.TargetID(), nil + }, + }, + "target_type": &g.Field{ + Type: assignmentTargetType, + Resolve: func(p g.ResolveParams) (interface{}, error) { + o, err := getUserOverride(p.Source) + if err != nil { + return nil, err + } + return o.Target.TargetType(), nil + }, + }, + } +} + +func (h *Handler) updateUserOverrideField() *g.Field { + return &g.Field{ + Type: h.userOverride, + Args: g.FieldConfigArgument{ + "input": &g.ArgumentConfig{ + Type: g.NewInputObject(g.InputObjectConfig{ + Name: "UpdateUserOverrideInput", + Fields: g.InputObjectConfigFieldMap{ + "id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "target_id": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "target_type": &g.InputObjectFieldConfig{Type: g.NewNonNull(userOverrideTargetType)}, + + "add_user_id": &g.InputObjectFieldConfig{Type: g.String}, + "remove_user_id": &g.InputObjectFieldConfig{Type: g.String}, + + "start_time": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + "end_time": &g.InputObjectFieldConfig{Type: g.NewNonNull(g.String)}, + }, + }), + }, + }, + Resolve: func(p g.ResolveParams) (interface{}, error) { + m, ok := p.Args["input"].(map[string]interface{}) + if !ok { + return nil, errors.New("invalid input type") + } + scrub := newScrubber(p.Context).scrub + + var o override.UserOverride + + var tgt assignment.RawTarget + tgt.ID, _ = m["target_id"].(string) + tgt.Type, _ = m["target_type"].(assignment.TargetType) + o.Target = tgt + o.ID, _ = m["id"].(string) + o.AddUserID, _ = m["add_user_id"].(string) + o.RemoveUserID, _ = m["remove_user_id"].(string) + + startStr, _ := m["start_time"].(string) + endStr, _ := m["end_time"].(string) + + var err error + o.Start, err = time.Parse(time.RFC3339, startStr) + if err != nil { + return nil, validation.NewFieldError("Start", err.Error()) + } + o.End, err = time.Parse(time.RFC3339, endStr) + if err != nil { + return nil, validation.NewFieldError("End", err.Error()) + } + + return scrub(o, h.c.OverrideStore.UpdateUserOverride(p.Context, &o)) + }, + } +} diff --git a/graphql/util.go b/graphql/util.go new file mode 100644 index 0000000000..0b80cdca5e --- /dev/null +++ b/graphql/util.go @@ -0,0 +1,75 @@ +package graphql + +import ( + "context" + "database/sql" + "github.com/target/goalert/schedule/rule" + "github.com/target/goalert/util/errutil" + "github.com/target/goalert/util/log" + "time" + + g "github.com/graphql-go/graphql" + "github.com/lib/pq" + "github.com/pkg/errors" +) + +// ISOTimestamp is a timestamp formatted as a string in the ISO format +var ISOTimestamp = g.NewScalar(g.ScalarConfig{ + Name: "ISOTimestamp", + Description: "ISOTimestamp is a timestamp formatted as a string in the ISO format (RFC3339).", + Serialize: func(val interface{}) interface{} { + return val.(time.Time).Format(time.RFC3339Nano) + }, +}) + +// HourTime is a timestamp containing only the hour and minute +var HourTime = g.NewScalar(g.ScalarConfig{ + Name: "HourTime", + Description: "HourTime is a timestamp containing only the hour and minute.", + Serialize: func(val interface{}) interface{} { + return val.(rule.Clock).String() + }, +}) + +type scrubber struct{ ctx context.Context } + +func isCtxCause(err error) bool { + if err == context.Canceled { + return true + } + if err == context.DeadlineExceeded { + return true + } + if err == sql.ErrTxDone { + return true + } + + // 57014 = query_canceled + // https://www.postgresql.org/docs/9.6/static/errcodes-appendix.html + if e, ok := err.(*pq.Error); ok && e.Code == "57014" { + return true + } + + return false +} + +func newScrubber(ctx context.Context) *scrubber { return &scrubber{ctx: ctx} } +func (s *scrubber) scrub(val interface{}, err error) (interface{}, error) { + if err == nil { + return val, nil + } + cause := errors.Cause(err) + if cause == sql.ErrNoRows || (s.ctx.Err() != nil && isCtxCause(cause)) { + log.Debug(s.ctx, errors.Wrap(err, "graphql")) + return nil, nil + } + err = errutil.MapDBError(err) + orig := err + scrubbed, err := errutil.ScrubError(err) + if scrubbed { + log.Log(s.ctx, errors.Wrap(orig, "graphql")) + } else { + log.Debug(s.ctx, errors.Wrap(err, "graphql")) + } + return nil, err +} diff --git a/graphql2/clocktime.go b/graphql2/clocktime.go new file mode 100644 index 0000000000..9b8bac41d8 --- /dev/null +++ b/graphql2/clocktime.go @@ -0,0 +1,22 @@ +package graphql2 + +import ( + "github.com/target/goalert/schedule/rule" + "io" + + graphql "github.com/99designs/gqlgen/graphql" + "github.com/pkg/errors" +) + +func MarshalClockTime(c rule.Clock) graphql.Marshaler { + return graphql.WriterFunc(func(w io.Writer) { + io.WriteString(w, "\""+c.String()+"\"") + }) +} +func UnmarshalClockTime(v interface{}) (rule.Clock, error) { + str, ok := v.(string) + if !ok { + return rule.Clock(0), errors.New("ClockTime must be strings") + } + return rule.ParseClock(str) +} diff --git a/graphql2/cmtype.go b/graphql2/cmtype.go new file mode 100644 index 0000000000..df752af773 --- /dev/null +++ b/graphql2/cmtype.go @@ -0,0 +1,25 @@ +package graphql2 + +import ( + "github.com/target/goalert/user/contactmethod" + "io" + "strings" + + graphql "github.com/99designs/gqlgen/graphql" + "github.com/pkg/errors" +) + +func MarshalContactMethodType(t contactmethod.Type) graphql.Marshaler { + return graphql.WriterFunc(func(w io.Writer) { + io.WriteString(w, `"`+string(t)+`"`) + }) +} +func UnmarshalContactMethodType(v interface{}) (contactmethod.Type, error) { + str, ok := v.(string) + if !ok { + return "", errors.New("timestamps must be strings") + } + str = strings.Trim(str, `"`) + + return contactmethod.Type(str), nil +} diff --git a/graphql2/gen.go b/graphql2/gen.go new file mode 100644 index 0000000000..776a06feac --- /dev/null +++ b/graphql2/gen.go @@ -0,0 +1,6 @@ +package graphql2 + +//go:generate rm -f mapconfig.go +//go:generate go run ../devtools/gqlgen/gqlgen.go -config gqlgen.yml +//go:generate go run ../devtools/configparams/main.go -out mapconfig.go +//go:generate go run golang.org/x/tools/cmd/goimports -w mapconfig.go diff --git a/graphql2/generated.go b/graphql2/generated.go new file mode 100644 index 0000000000..aefccce680 --- /dev/null +++ b/graphql2/generated.go @@ -0,0 +1,16420 @@ +// Code generated by github.com/99designs/gqlgen, DO NOT EDIT. + +package graphql2 + +import ( + "bytes" + "context" + "errors" + "strconv" + "sync" + "time" + + "github.com/99designs/gqlgen/graphql" + "github.com/99designs/gqlgen/graphql/introspection" + "github.com/target/goalert/alert" + "github.com/target/goalert/assignment" + "github.com/target/goalert/escalation" + "github.com/target/goalert/integrationkey" + "github.com/target/goalert/label" + "github.com/target/goalert/notification/slack" + "github.com/target/goalert/oncall" + "github.com/target/goalert/override" + "github.com/target/goalert/schedule" + "github.com/target/goalert/schedule/rotation" + "github.com/target/goalert/schedule/rule" + "github.com/target/goalert/service" + "github.com/target/goalert/user" + "github.com/target/goalert/user/contactmethod" + "github.com/target/goalert/user/notificationrule" + "github.com/vektah/gqlparser" + "github.com/vektah/gqlparser/ast" +) + +// region ************************** generated!.gotpl ************************** + +// NewExecutableSchema creates an ExecutableSchema from the ResolverRoot interface. +func NewExecutableSchema(cfg Config) graphql.ExecutableSchema { + return &executableSchema{ + resolvers: cfg.Resolvers, + directives: cfg.Directives, + complexity: cfg.Complexity, + } +} + +type Config struct { + Resolvers ResolverRoot + Directives DirectiveRoot + Complexity ComplexityRoot +} + +type ResolverRoot interface { + Alert() AlertResolver + EscalationPolicy() EscalationPolicyResolver + EscalationPolicyStep() EscalationPolicyStepResolver + IntegrationKey() IntegrationKeyResolver + Mutation() MutationResolver + OnCallShift() OnCallShiftResolver + Query() QueryResolver + Rotation() RotationResolver + Schedule() ScheduleResolver + ScheduleRule() ScheduleRuleResolver + Service() ServiceResolver + Target() TargetResolver + User() UserResolver + UserNotificationRule() UserNotificationRuleResolver + UserOverride() UserOverrideResolver +} + +type DirectiveRoot struct { +} + +type ComplexityRoot struct { + Alert struct { + AlertID func(childComplexity int) int + CreatedAt func(childComplexity int) int + Details func(childComplexity int) int + ID func(childComplexity int) int + Service func(childComplexity int) int + ServiceID func(childComplexity int) int + State func(childComplexity int) int + Status func(childComplexity int) int + Summary func(childComplexity int) int + } + + AlertConnection struct { + Nodes func(childComplexity int) int + PageInfo func(childComplexity int) int + } + + AlertState struct { + LastEscalation func(childComplexity int) int + RepeatCount func(childComplexity int) int + StepNumber func(childComplexity int) int + } + + AuthSubject struct { + ProviderID func(childComplexity int) int + SubjectID func(childComplexity int) int + UserID func(childComplexity int) int + } + + AuthSubjectConnection struct { + Nodes func(childComplexity int) int + PageInfo func(childComplexity int) int + } + + ConfigValue struct { + Description func(childComplexity int) int + ID func(childComplexity int) int + Password func(childComplexity int) int + Type func(childComplexity int) int + Value func(childComplexity int) int + } + + EscalationPolicy struct { + AssignedTo func(childComplexity int) int + Description func(childComplexity int) int + ID func(childComplexity int) int + Name func(childComplexity int) int + Repeat func(childComplexity int) int + Steps func(childComplexity int) int + } + + EscalationPolicyConnection struct { + Nodes func(childComplexity int) int + PageInfo func(childComplexity int) int + } + + EscalationPolicyStep struct { + DelayMinutes func(childComplexity int) int + EscalationPolicy func(childComplexity int) int + ID func(childComplexity int) int + StepNumber func(childComplexity int) int + Targets func(childComplexity int) int + } + + IntegrationKey struct { + Href func(childComplexity int) int + ID func(childComplexity int) int + Name func(childComplexity int) int + ServiceID func(childComplexity int) int + Type func(childComplexity int) int + } + + Label struct { + Key func(childComplexity int) int + Value func(childComplexity int) int + } + + LabelConnection struct { + Nodes func(childComplexity int) int + PageInfo func(childComplexity int) int + } + + Mutation struct { + AddAuthSubject func(childComplexity int, input user.AuthSubject) int + CreateEscalationPolicy func(childComplexity int, input CreateEscalationPolicyInput) int + CreateEscalationPolicyStep func(childComplexity int, input CreateEscalationPolicyStepInput) int + CreateIntegrationKey func(childComplexity int, input CreateIntegrationKeyInput) int + CreateRotation func(childComplexity int, input CreateRotationInput) int + CreateSchedule func(childComplexity int, input CreateScheduleInput) int + CreateService func(childComplexity int, input CreateServiceInput) int + CreateUserContactMethod func(childComplexity int, input CreateUserContactMethodInput) int + CreateUserNotificationRule func(childComplexity int, input CreateUserNotificationRuleInput) int + CreateUserOverride func(childComplexity int, input CreateUserOverrideInput) int + DeleteAll func(childComplexity int, input []assignment.RawTarget) int + DeleteAuthSubject func(childComplexity int, input user.AuthSubject) int + EscalateAlerts func(childComplexity int, input []int) int + SetConfig func(childComplexity int, input []ConfigValueInput) int + SetFavorite func(childComplexity int, input SetFavoriteInput) int + SetLabel func(childComplexity int, input SetLabelInput) int + TestContactMethod func(childComplexity int, id string) int + UpdateAlerts func(childComplexity int, input UpdateAlertsInput) int + UpdateEscalationPolicy func(childComplexity int, input UpdateEscalationPolicyInput) int + UpdateEscalationPolicyStep func(childComplexity int, input UpdateEscalationPolicyStepInput) int + UpdateRotation func(childComplexity int, input UpdateRotationInput) int + UpdateSchedule func(childComplexity int, input UpdateScheduleInput) int + UpdateScheduleTarget func(childComplexity int, input ScheduleTargetInput) int + UpdateService func(childComplexity int, input UpdateServiceInput) int + UpdateUser func(childComplexity int, input UpdateUserInput) int + UpdateUserContactMethod func(childComplexity int, input UpdateUserContactMethodInput) int + UpdateUserOverride func(childComplexity int, input UpdateUserOverrideInput) int + } + + OnCallShift struct { + End func(childComplexity int) int + Start func(childComplexity int) int + Truncated func(childComplexity int) int + User func(childComplexity int) int + UserID func(childComplexity int) int + } + + PageInfo struct { + EndCursor func(childComplexity int) int + HasNextPage func(childComplexity int) int + } + + Query struct { + Alert func(childComplexity int, id int) int + Alerts func(childComplexity int, input *AlertSearchOptions) int + AuthSubjectsForProvider func(childComplexity int, first *int, after *string, providerID string) int + Config func(childComplexity int, all *bool) int + EscalationPolicies func(childComplexity int, input *EscalationPolicySearchOptions) int + EscalationPolicy func(childComplexity int, id string) int + IntegrationKey func(childComplexity int, id string) int + Labels func(childComplexity int, input *LabelSearchOptions) int + Rotation func(childComplexity int, id string) int + Rotations func(childComplexity int, input *RotationSearchOptions) int + Schedule func(childComplexity int, id string) int + Schedules func(childComplexity int, input *ScheduleSearchOptions) int + Service func(childComplexity int, id string) int + Services func(childComplexity int, input *ServiceSearchOptions) int + SlackChannel func(childComplexity int, id string) int + SlackChannels func(childComplexity int, input *SlackChannelSearchOptions) int + TimeZones func(childComplexity int, input *TimeZoneSearchOptions) int + User func(childComplexity int, id *string) int + UserContactMethod func(childComplexity int, id string) int + UserOverride func(childComplexity int, id string) int + UserOverrides func(childComplexity int, input *UserOverrideSearchOptions) int + Users func(childComplexity int, input *UserSearchOptions, first *int, after *string, search *string) int + } + + Rotation struct { + ActiveUserIndex func(childComplexity int) int + Description func(childComplexity int) int + ID func(childComplexity int) int + Name func(childComplexity int) int + NextHandoffTimes func(childComplexity int, num *int) int + ShiftLength func(childComplexity int) int + Start func(childComplexity int) int + TimeZone func(childComplexity int) int + Type func(childComplexity int) int + UserIDs func(childComplexity int) int + Users func(childComplexity int) int + } + + RotationConnection struct { + Nodes func(childComplexity int) int + PageInfo func(childComplexity int) int + } + + Schedule struct { + AssignedTo func(childComplexity int) int + Description func(childComplexity int) int + ID func(childComplexity int) int + Name func(childComplexity int) int + Shifts func(childComplexity int, start time.Time, end time.Time) int + Target func(childComplexity int, input assignment.RawTarget) int + Targets func(childComplexity int) int + TimeZone func(childComplexity int) int + } + + ScheduleConnection struct { + Nodes func(childComplexity int) int + PageInfo func(childComplexity int) int + } + + ScheduleRule struct { + End func(childComplexity int) int + ID func(childComplexity int) int + ScheduleID func(childComplexity int) int + Start func(childComplexity int) int + Target func(childComplexity int) int + WeekdayFilter func(childComplexity int) int + } + + ScheduleTarget struct { + Rules func(childComplexity int) int + ScheduleID func(childComplexity int) int + Target func(childComplexity int) int + } + + Service struct { + Description func(childComplexity int) int + EscalationPolicy func(childComplexity int) int + EscalationPolicyID func(childComplexity int) int + ID func(childComplexity int) int + IntegrationKeys func(childComplexity int) int + IsFavorite func(childComplexity int) int + Labels func(childComplexity int) int + Name func(childComplexity int) int + OnCallUsers func(childComplexity int) int + } + + ServiceConnection struct { + Nodes func(childComplexity int) int + PageInfo func(childComplexity int) int + } + + ServiceOnCallUser struct { + StepNumber func(childComplexity int) int + UserID func(childComplexity int) int + UserName func(childComplexity int) int + } + + SlackChannel struct { + ID func(childComplexity int) int + Name func(childComplexity int) int + } + + SlackChannelConnection struct { + Nodes func(childComplexity int) int + PageInfo func(childComplexity int) int + } + + Target struct { + ID func(childComplexity int) int + Name func(childComplexity int) int + Type func(childComplexity int) int + } + + TimeZone struct { + ID func(childComplexity int) int + } + + TimeZoneConnection struct { + Nodes func(childComplexity int) int + PageInfo func(childComplexity int) int + } + + User struct { + AlertStatusCMID func(childComplexity int) int + AuthSubjects func(childComplexity int) int + ContactMethods func(childComplexity int) int + Email func(childComplexity int) int + ID func(childComplexity int) int + Name func(childComplexity int) int + NotificationRules func(childComplexity int) int + OnCallSteps func(childComplexity int) int + Role func(childComplexity int) int + } + + UserConnection struct { + Nodes func(childComplexity int) int + PageInfo func(childComplexity int) int + } + + UserContactMethod struct { + ID func(childComplexity int) int + Name func(childComplexity int) int + Type func(childComplexity int) int + Value func(childComplexity int) int + } + + UserNotificationRule struct { + ContactMethod func(childComplexity int) int + ContactMethodID func(childComplexity int) int + DelayMinutes func(childComplexity int) int + ID func(childComplexity int) int + } + + UserOverride struct { + AddUser func(childComplexity int) int + AddUserID func(childComplexity int) int + End func(childComplexity int) int + ID func(childComplexity int) int + RemoveUser func(childComplexity int) int + RemoveUserID func(childComplexity int) int + Start func(childComplexity int) int + Target func(childComplexity int) int + } + + UserOverrideConnection struct { + Nodes func(childComplexity int) int + PageInfo func(childComplexity int) int + } +} + +type AlertResolver interface { + AlertID(ctx context.Context, obj *alert.Alert) (int, error) + Status(ctx context.Context, obj *alert.Alert) (AlertStatus, error) + + Service(ctx context.Context, obj *alert.Alert) (*service.Service, error) + State(ctx context.Context, obj *alert.Alert) (*alert.State, error) +} +type EscalationPolicyResolver interface { + AssignedTo(ctx context.Context, obj *escalation.Policy) ([]assignment.RawTarget, error) + Steps(ctx context.Context, obj *escalation.Policy) ([]escalation.Step, error) +} +type EscalationPolicyStepResolver interface { + Targets(ctx context.Context, obj *escalation.Step) ([]assignment.RawTarget, error) + EscalationPolicy(ctx context.Context, obj *escalation.Step) (*escalation.Policy, error) +} +type IntegrationKeyResolver interface { + Type(ctx context.Context, obj *integrationkey.IntegrationKey) (IntegrationKeyType, error) + + Href(ctx context.Context, obj *integrationkey.IntegrationKey) (string, error) +} +type MutationResolver interface { + AddAuthSubject(ctx context.Context, input user.AuthSubject) (bool, error) + DeleteAuthSubject(ctx context.Context, input user.AuthSubject) (bool, error) + UpdateUser(ctx context.Context, input UpdateUserInput) (bool, error) + TestContactMethod(ctx context.Context, id string) (bool, error) + UpdateAlerts(ctx context.Context, input UpdateAlertsInput) ([]alert.Alert, error) + UpdateRotation(ctx context.Context, input UpdateRotationInput) (bool, error) + EscalateAlerts(ctx context.Context, input []int) ([]alert.Alert, error) + SetFavorite(ctx context.Context, input SetFavoriteInput) (bool, error) + UpdateService(ctx context.Context, input UpdateServiceInput) (bool, error) + UpdateEscalationPolicy(ctx context.Context, input UpdateEscalationPolicyInput) (bool, error) + UpdateEscalationPolicyStep(ctx context.Context, input UpdateEscalationPolicyStepInput) (bool, error) + DeleteAll(ctx context.Context, input []assignment.RawTarget) (bool, error) + CreateService(ctx context.Context, input CreateServiceInput) (*service.Service, error) + CreateEscalationPolicy(ctx context.Context, input CreateEscalationPolicyInput) (*escalation.Policy, error) + CreateEscalationPolicyStep(ctx context.Context, input CreateEscalationPolicyStepInput) (*escalation.Step, error) + CreateRotation(ctx context.Context, input CreateRotationInput) (*rotation.Rotation, error) + CreateIntegrationKey(ctx context.Context, input CreateIntegrationKeyInput) (*integrationkey.IntegrationKey, error) + SetLabel(ctx context.Context, input SetLabelInput) (bool, error) + CreateSchedule(ctx context.Context, input CreateScheduleInput) (*schedule.Schedule, error) + UpdateScheduleTarget(ctx context.Context, input ScheduleTargetInput) (bool, error) + CreateUserOverride(ctx context.Context, input CreateUserOverrideInput) (*override.UserOverride, error) + CreateUserContactMethod(ctx context.Context, input CreateUserContactMethodInput) (*contactmethod.ContactMethod, error) + CreateUserNotificationRule(ctx context.Context, input CreateUserNotificationRuleInput) (*notificationrule.NotificationRule, error) + UpdateUserContactMethod(ctx context.Context, input UpdateUserContactMethodInput) (bool, error) + UpdateSchedule(ctx context.Context, input UpdateScheduleInput) (bool, error) + UpdateUserOverride(ctx context.Context, input UpdateUserOverrideInput) (bool, error) + SetConfig(ctx context.Context, input []ConfigValueInput) (bool, error) +} +type OnCallShiftResolver interface { + User(ctx context.Context, obj *oncall.Shift) (*user.User, error) +} +type QueryResolver interface { + User(ctx context.Context, id *string) (*user.User, error) + Users(ctx context.Context, input *UserSearchOptions, first *int, after *string, search *string) (*UserConnection, error) + Alert(ctx context.Context, id int) (*alert.Alert, error) + Alerts(ctx context.Context, input *AlertSearchOptions) (*AlertConnection, error) + Service(ctx context.Context, id string) (*service.Service, error) + IntegrationKey(ctx context.Context, id string) (*integrationkey.IntegrationKey, error) + Services(ctx context.Context, input *ServiceSearchOptions) (*ServiceConnection, error) + Rotation(ctx context.Context, id string) (*rotation.Rotation, error) + Rotations(ctx context.Context, input *RotationSearchOptions) (*RotationConnection, error) + Schedule(ctx context.Context, id string) (*schedule.Schedule, error) + Schedules(ctx context.Context, input *ScheduleSearchOptions) (*ScheduleConnection, error) + EscalationPolicy(ctx context.Context, id string) (*escalation.Policy, error) + EscalationPolicies(ctx context.Context, input *EscalationPolicySearchOptions) (*EscalationPolicyConnection, error) + AuthSubjectsForProvider(ctx context.Context, first *int, after *string, providerID string) (*AuthSubjectConnection, error) + TimeZones(ctx context.Context, input *TimeZoneSearchOptions) (*TimeZoneConnection, error) + Labels(ctx context.Context, input *LabelSearchOptions) (*LabelConnection, error) + UserOverrides(ctx context.Context, input *UserOverrideSearchOptions) (*UserOverrideConnection, error) + UserOverride(ctx context.Context, id string) (*override.UserOverride, error) + Config(ctx context.Context, all *bool) ([]ConfigValue, error) + UserContactMethod(ctx context.Context, id string) (*contactmethod.ContactMethod, error) + SlackChannels(ctx context.Context, input *SlackChannelSearchOptions) (*SlackChannelConnection, error) + SlackChannel(ctx context.Context, id string) (*slack.Channel, error) +} +type RotationResolver interface { + TimeZone(ctx context.Context, obj *rotation.Rotation) (string, error) + + ActiveUserIndex(ctx context.Context, obj *rotation.Rotation) (int, error) + UserIDs(ctx context.Context, obj *rotation.Rotation) ([]string, error) + Users(ctx context.Context, obj *rotation.Rotation) ([]user.User, error) + NextHandoffTimes(ctx context.Context, obj *rotation.Rotation, num *int) ([]time.Time, error) +} +type ScheduleResolver interface { + TimeZone(ctx context.Context, obj *schedule.Schedule) (string, error) + AssignedTo(ctx context.Context, obj *schedule.Schedule) ([]assignment.RawTarget, error) + Shifts(ctx context.Context, obj *schedule.Schedule, start time.Time, end time.Time) ([]oncall.Shift, error) + Targets(ctx context.Context, obj *schedule.Schedule) ([]ScheduleTarget, error) + Target(ctx context.Context, obj *schedule.Schedule, input assignment.RawTarget) (*ScheduleTarget, error) +} +type ScheduleRuleResolver interface { + WeekdayFilter(ctx context.Context, obj *rule.Rule) ([]bool, error) + Target(ctx context.Context, obj *rule.Rule) (*assignment.RawTarget, error) +} +type ServiceResolver interface { + EscalationPolicy(ctx context.Context, obj *service.Service) (*escalation.Policy, error) + IsFavorite(ctx context.Context, obj *service.Service) (bool, error) + OnCallUsers(ctx context.Context, obj *service.Service) ([]oncall.ServiceOnCallUser, error) + IntegrationKeys(ctx context.Context, obj *service.Service) ([]integrationkey.IntegrationKey, error) + Labels(ctx context.Context, obj *service.Service) ([]label.Label, error) +} +type TargetResolver interface { + Name(ctx context.Context, obj *assignment.RawTarget) (*string, error) +} +type UserResolver interface { + Role(ctx context.Context, obj *user.User) (UserRole, error) + + ContactMethods(ctx context.Context, obj *user.User) ([]contactmethod.ContactMethod, error) + NotificationRules(ctx context.Context, obj *user.User) ([]notificationrule.NotificationRule, error) + + AuthSubjects(ctx context.Context, obj *user.User) ([]user.AuthSubject, error) + OnCallSteps(ctx context.Context, obj *user.User) ([]escalation.Step, error) +} +type UserNotificationRuleResolver interface { + ContactMethod(ctx context.Context, obj *notificationrule.NotificationRule) (*contactmethod.ContactMethod, error) +} +type UserOverrideResolver interface { + AddUser(ctx context.Context, obj *override.UserOverride) (*user.User, error) + RemoveUser(ctx context.Context, obj *override.UserOverride) (*user.User, error) + Target(ctx context.Context, obj *override.UserOverride) (*assignment.RawTarget, error) +} + +type executableSchema struct { + resolvers ResolverRoot + directives DirectiveRoot + complexity ComplexityRoot +} + +func (e *executableSchema) Schema() *ast.Schema { + return parsedSchema +} + +func (e *executableSchema) Complexity(typeName, field string, childComplexity int, rawArgs map[string]interface{}) (int, bool) { + ec := executionContext{nil, e} + _ = ec + switch typeName + "." + field { + + case "Alert.AlertID": + if e.complexity.Alert.AlertID == nil { + break + } + + return e.complexity.Alert.AlertID(childComplexity), true + + case "Alert.CreatedAt": + if e.complexity.Alert.CreatedAt == nil { + break + } + + return e.complexity.Alert.CreatedAt(childComplexity), true + + case "Alert.Details": + if e.complexity.Alert.Details == nil { + break + } + + return e.complexity.Alert.Details(childComplexity), true + + case "Alert.ID": + if e.complexity.Alert.ID == nil { + break + } + + return e.complexity.Alert.ID(childComplexity), true + + case "Alert.Service": + if e.complexity.Alert.Service == nil { + break + } + + return e.complexity.Alert.Service(childComplexity), true + + case "Alert.ServiceID": + if e.complexity.Alert.ServiceID == nil { + break + } + + return e.complexity.Alert.ServiceID(childComplexity), true + + case "Alert.State": + if e.complexity.Alert.State == nil { + break + } + + return e.complexity.Alert.State(childComplexity), true + + case "Alert.Status": + if e.complexity.Alert.Status == nil { + break + } + + return e.complexity.Alert.Status(childComplexity), true + + case "Alert.Summary": + if e.complexity.Alert.Summary == nil { + break + } + + return e.complexity.Alert.Summary(childComplexity), true + + case "AlertConnection.Nodes": + if e.complexity.AlertConnection.Nodes == nil { + break + } + + return e.complexity.AlertConnection.Nodes(childComplexity), true + + case "AlertConnection.PageInfo": + if e.complexity.AlertConnection.PageInfo == nil { + break + } + + return e.complexity.AlertConnection.PageInfo(childComplexity), true + + case "AlertState.LastEscalation": + if e.complexity.AlertState.LastEscalation == nil { + break + } + + return e.complexity.AlertState.LastEscalation(childComplexity), true + + case "AlertState.RepeatCount": + if e.complexity.AlertState.RepeatCount == nil { + break + } + + return e.complexity.AlertState.RepeatCount(childComplexity), true + + case "AlertState.StepNumber": + if e.complexity.AlertState.StepNumber == nil { + break + } + + return e.complexity.AlertState.StepNumber(childComplexity), true + + case "AuthSubject.ProviderID": + if e.complexity.AuthSubject.ProviderID == nil { + break + } + + return e.complexity.AuthSubject.ProviderID(childComplexity), true + + case "AuthSubject.SubjectID": + if e.complexity.AuthSubject.SubjectID == nil { + break + } + + return e.complexity.AuthSubject.SubjectID(childComplexity), true + + case "AuthSubject.UserID": + if e.complexity.AuthSubject.UserID == nil { + break + } + + return e.complexity.AuthSubject.UserID(childComplexity), true + + case "AuthSubjectConnection.Nodes": + if e.complexity.AuthSubjectConnection.Nodes == nil { + break + } + + return e.complexity.AuthSubjectConnection.Nodes(childComplexity), true + + case "AuthSubjectConnection.PageInfo": + if e.complexity.AuthSubjectConnection.PageInfo == nil { + break + } + + return e.complexity.AuthSubjectConnection.PageInfo(childComplexity), true + + case "ConfigValue.Description": + if e.complexity.ConfigValue.Description == nil { + break + } + + return e.complexity.ConfigValue.Description(childComplexity), true + + case "ConfigValue.ID": + if e.complexity.ConfigValue.ID == nil { + break + } + + return e.complexity.ConfigValue.ID(childComplexity), true + + case "ConfigValue.Password": + if e.complexity.ConfigValue.Password == nil { + break + } + + return e.complexity.ConfigValue.Password(childComplexity), true + + case "ConfigValue.Type": + if e.complexity.ConfigValue.Type == nil { + break + } + + return e.complexity.ConfigValue.Type(childComplexity), true + + case "ConfigValue.Value": + if e.complexity.ConfigValue.Value == nil { + break + } + + return e.complexity.ConfigValue.Value(childComplexity), true + + case "EscalationPolicy.AssignedTo": + if e.complexity.EscalationPolicy.AssignedTo == nil { + break + } + + return e.complexity.EscalationPolicy.AssignedTo(childComplexity), true + + case "EscalationPolicy.Description": + if e.complexity.EscalationPolicy.Description == nil { + break + } + + return e.complexity.EscalationPolicy.Description(childComplexity), true + + case "EscalationPolicy.ID": + if e.complexity.EscalationPolicy.ID == nil { + break + } + + return e.complexity.EscalationPolicy.ID(childComplexity), true + + case "EscalationPolicy.Name": + if e.complexity.EscalationPolicy.Name == nil { + break + } + + return e.complexity.EscalationPolicy.Name(childComplexity), true + + case "EscalationPolicy.Repeat": + if e.complexity.EscalationPolicy.Repeat == nil { + break + } + + return e.complexity.EscalationPolicy.Repeat(childComplexity), true + + case "EscalationPolicy.Steps": + if e.complexity.EscalationPolicy.Steps == nil { + break + } + + return e.complexity.EscalationPolicy.Steps(childComplexity), true + + case "EscalationPolicyConnection.Nodes": + if e.complexity.EscalationPolicyConnection.Nodes == nil { + break + } + + return e.complexity.EscalationPolicyConnection.Nodes(childComplexity), true + + case "EscalationPolicyConnection.PageInfo": + if e.complexity.EscalationPolicyConnection.PageInfo == nil { + break + } + + return e.complexity.EscalationPolicyConnection.PageInfo(childComplexity), true + + case "EscalationPolicyStep.DelayMinutes": + if e.complexity.EscalationPolicyStep.DelayMinutes == nil { + break + } + + return e.complexity.EscalationPolicyStep.DelayMinutes(childComplexity), true + + case "EscalationPolicyStep.EscalationPolicy": + if e.complexity.EscalationPolicyStep.EscalationPolicy == nil { + break + } + + return e.complexity.EscalationPolicyStep.EscalationPolicy(childComplexity), true + + case "EscalationPolicyStep.ID": + if e.complexity.EscalationPolicyStep.ID == nil { + break + } + + return e.complexity.EscalationPolicyStep.ID(childComplexity), true + + case "EscalationPolicyStep.StepNumber": + if e.complexity.EscalationPolicyStep.StepNumber == nil { + break + } + + return e.complexity.EscalationPolicyStep.StepNumber(childComplexity), true + + case "EscalationPolicyStep.Targets": + if e.complexity.EscalationPolicyStep.Targets == nil { + break + } + + return e.complexity.EscalationPolicyStep.Targets(childComplexity), true + + case "IntegrationKey.Href": + if e.complexity.IntegrationKey.Href == nil { + break + } + + return e.complexity.IntegrationKey.Href(childComplexity), true + + case "IntegrationKey.ID": + if e.complexity.IntegrationKey.ID == nil { + break + } + + return e.complexity.IntegrationKey.ID(childComplexity), true + + case "IntegrationKey.Name": + if e.complexity.IntegrationKey.Name == nil { + break + } + + return e.complexity.IntegrationKey.Name(childComplexity), true + + case "IntegrationKey.ServiceID": + if e.complexity.IntegrationKey.ServiceID == nil { + break + } + + return e.complexity.IntegrationKey.ServiceID(childComplexity), true + + case "IntegrationKey.Type": + if e.complexity.IntegrationKey.Type == nil { + break + } + + return e.complexity.IntegrationKey.Type(childComplexity), true + + case "Label.Key": + if e.complexity.Label.Key == nil { + break + } + + return e.complexity.Label.Key(childComplexity), true + + case "Label.Value": + if e.complexity.Label.Value == nil { + break + } + + return e.complexity.Label.Value(childComplexity), true + + case "LabelConnection.Nodes": + if e.complexity.LabelConnection.Nodes == nil { + break + } + + return e.complexity.LabelConnection.Nodes(childComplexity), true + + case "LabelConnection.PageInfo": + if e.complexity.LabelConnection.PageInfo == nil { + break + } + + return e.complexity.LabelConnection.PageInfo(childComplexity), true + + case "Mutation.AddAuthSubject": + if e.complexity.Mutation.AddAuthSubject == nil { + break + } + + args, err := ec.field_Mutation_addAuthSubject_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.AddAuthSubject(childComplexity, args["input"].(user.AuthSubject)), true + + case "Mutation.CreateEscalationPolicy": + if e.complexity.Mutation.CreateEscalationPolicy == nil { + break + } + + args, err := ec.field_Mutation_createEscalationPolicy_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.CreateEscalationPolicy(childComplexity, args["input"].(CreateEscalationPolicyInput)), true + + case "Mutation.CreateEscalationPolicyStep": + if e.complexity.Mutation.CreateEscalationPolicyStep == nil { + break + } + + args, err := ec.field_Mutation_createEscalationPolicyStep_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.CreateEscalationPolicyStep(childComplexity, args["input"].(CreateEscalationPolicyStepInput)), true + + case "Mutation.CreateIntegrationKey": + if e.complexity.Mutation.CreateIntegrationKey == nil { + break + } + + args, err := ec.field_Mutation_createIntegrationKey_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.CreateIntegrationKey(childComplexity, args["input"].(CreateIntegrationKeyInput)), true + + case "Mutation.CreateRotation": + if e.complexity.Mutation.CreateRotation == nil { + break + } + + args, err := ec.field_Mutation_createRotation_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.CreateRotation(childComplexity, args["input"].(CreateRotationInput)), true + + case "Mutation.CreateSchedule": + if e.complexity.Mutation.CreateSchedule == nil { + break + } + + args, err := ec.field_Mutation_createSchedule_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.CreateSchedule(childComplexity, args["input"].(CreateScheduleInput)), true + + case "Mutation.CreateService": + if e.complexity.Mutation.CreateService == nil { + break + } + + args, err := ec.field_Mutation_createService_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.CreateService(childComplexity, args["input"].(CreateServiceInput)), true + + case "Mutation.CreateUserContactMethod": + if e.complexity.Mutation.CreateUserContactMethod == nil { + break + } + + args, err := ec.field_Mutation_createUserContactMethod_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.CreateUserContactMethod(childComplexity, args["input"].(CreateUserContactMethodInput)), true + + case "Mutation.CreateUserNotificationRule": + if e.complexity.Mutation.CreateUserNotificationRule == nil { + break + } + + args, err := ec.field_Mutation_createUserNotificationRule_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.CreateUserNotificationRule(childComplexity, args["input"].(CreateUserNotificationRuleInput)), true + + case "Mutation.CreateUserOverride": + if e.complexity.Mutation.CreateUserOverride == nil { + break + } + + args, err := ec.field_Mutation_createUserOverride_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.CreateUserOverride(childComplexity, args["input"].(CreateUserOverrideInput)), true + + case "Mutation.DeleteAll": + if e.complexity.Mutation.DeleteAll == nil { + break + } + + args, err := ec.field_Mutation_deleteAll_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.DeleteAll(childComplexity, args["input"].([]assignment.RawTarget)), true + + case "Mutation.DeleteAuthSubject": + if e.complexity.Mutation.DeleteAuthSubject == nil { + break + } + + args, err := ec.field_Mutation_deleteAuthSubject_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.DeleteAuthSubject(childComplexity, args["input"].(user.AuthSubject)), true + + case "Mutation.EscalateAlerts": + if e.complexity.Mutation.EscalateAlerts == nil { + break + } + + args, err := ec.field_Mutation_escalateAlerts_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.EscalateAlerts(childComplexity, args["input"].([]int)), true + + case "Mutation.SetConfig": + if e.complexity.Mutation.SetConfig == nil { + break + } + + args, err := ec.field_Mutation_setConfig_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.SetConfig(childComplexity, args["input"].([]ConfigValueInput)), true + + case "Mutation.SetFavorite": + if e.complexity.Mutation.SetFavorite == nil { + break + } + + args, err := ec.field_Mutation_setFavorite_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.SetFavorite(childComplexity, args["input"].(SetFavoriteInput)), true + + case "Mutation.SetLabel": + if e.complexity.Mutation.SetLabel == nil { + break + } + + args, err := ec.field_Mutation_setLabel_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.SetLabel(childComplexity, args["input"].(SetLabelInput)), true + + case "Mutation.TestContactMethod": + if e.complexity.Mutation.TestContactMethod == nil { + break + } + + args, err := ec.field_Mutation_testContactMethod_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.TestContactMethod(childComplexity, args["id"].(string)), true + + case "Mutation.UpdateAlerts": + if e.complexity.Mutation.UpdateAlerts == nil { + break + } + + args, err := ec.field_Mutation_updateAlerts_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.UpdateAlerts(childComplexity, args["input"].(UpdateAlertsInput)), true + + case "Mutation.UpdateEscalationPolicy": + if e.complexity.Mutation.UpdateEscalationPolicy == nil { + break + } + + args, err := ec.field_Mutation_updateEscalationPolicy_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.UpdateEscalationPolicy(childComplexity, args["input"].(UpdateEscalationPolicyInput)), true + + case "Mutation.UpdateEscalationPolicyStep": + if e.complexity.Mutation.UpdateEscalationPolicyStep == nil { + break + } + + args, err := ec.field_Mutation_updateEscalationPolicyStep_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.UpdateEscalationPolicyStep(childComplexity, args["input"].(UpdateEscalationPolicyStepInput)), true + + case "Mutation.UpdateRotation": + if e.complexity.Mutation.UpdateRotation == nil { + break + } + + args, err := ec.field_Mutation_updateRotation_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.UpdateRotation(childComplexity, args["input"].(UpdateRotationInput)), true + + case "Mutation.UpdateSchedule": + if e.complexity.Mutation.UpdateSchedule == nil { + break + } + + args, err := ec.field_Mutation_updateSchedule_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.UpdateSchedule(childComplexity, args["input"].(UpdateScheduleInput)), true + + case "Mutation.UpdateScheduleTarget": + if e.complexity.Mutation.UpdateScheduleTarget == nil { + break + } + + args, err := ec.field_Mutation_updateScheduleTarget_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.UpdateScheduleTarget(childComplexity, args["input"].(ScheduleTargetInput)), true + + case "Mutation.UpdateService": + if e.complexity.Mutation.UpdateService == nil { + break + } + + args, err := ec.field_Mutation_updateService_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.UpdateService(childComplexity, args["input"].(UpdateServiceInput)), true + + case "Mutation.UpdateUser": + if e.complexity.Mutation.UpdateUser == nil { + break + } + + args, err := ec.field_Mutation_updateUser_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.UpdateUser(childComplexity, args["input"].(UpdateUserInput)), true + + case "Mutation.UpdateUserContactMethod": + if e.complexity.Mutation.UpdateUserContactMethod == nil { + break + } + + args, err := ec.field_Mutation_updateUserContactMethod_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.UpdateUserContactMethod(childComplexity, args["input"].(UpdateUserContactMethodInput)), true + + case "Mutation.UpdateUserOverride": + if e.complexity.Mutation.UpdateUserOverride == nil { + break + } + + args, err := ec.field_Mutation_updateUserOverride_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.UpdateUserOverride(childComplexity, args["input"].(UpdateUserOverrideInput)), true + + case "OnCallShift.End": + if e.complexity.OnCallShift.End == nil { + break + } + + return e.complexity.OnCallShift.End(childComplexity), true + + case "OnCallShift.Start": + if e.complexity.OnCallShift.Start == nil { + break + } + + return e.complexity.OnCallShift.Start(childComplexity), true + + case "OnCallShift.Truncated": + if e.complexity.OnCallShift.Truncated == nil { + break + } + + return e.complexity.OnCallShift.Truncated(childComplexity), true + + case "OnCallShift.User": + if e.complexity.OnCallShift.User == nil { + break + } + + return e.complexity.OnCallShift.User(childComplexity), true + + case "OnCallShift.UserID": + if e.complexity.OnCallShift.UserID == nil { + break + } + + return e.complexity.OnCallShift.UserID(childComplexity), true + + case "PageInfo.EndCursor": + if e.complexity.PageInfo.EndCursor == nil { + break + } + + return e.complexity.PageInfo.EndCursor(childComplexity), true + + case "PageInfo.HasNextPage": + if e.complexity.PageInfo.HasNextPage == nil { + break + } + + return e.complexity.PageInfo.HasNextPage(childComplexity), true + + case "Query.Alert": + if e.complexity.Query.Alert == nil { + break + } + + args, err := ec.field_Query_alert_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.Alert(childComplexity, args["id"].(int)), true + + case "Query.Alerts": + if e.complexity.Query.Alerts == nil { + break + } + + args, err := ec.field_Query_alerts_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.Alerts(childComplexity, args["input"].(*AlertSearchOptions)), true + + case "Query.AuthSubjectsForProvider": + if e.complexity.Query.AuthSubjectsForProvider == nil { + break + } + + args, err := ec.field_Query_authSubjectsForProvider_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.AuthSubjectsForProvider(childComplexity, args["first"].(*int), args["after"].(*string), args["providerID"].(string)), true + + case "Query.Config": + if e.complexity.Query.Config == nil { + break + } + + args, err := ec.field_Query_config_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.Config(childComplexity, args["all"].(*bool)), true + + case "Query.EscalationPolicies": + if e.complexity.Query.EscalationPolicies == nil { + break + } + + args, err := ec.field_Query_escalationPolicies_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.EscalationPolicies(childComplexity, args["input"].(*EscalationPolicySearchOptions)), true + + case "Query.EscalationPolicy": + if e.complexity.Query.EscalationPolicy == nil { + break + } + + args, err := ec.field_Query_escalationPolicy_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.EscalationPolicy(childComplexity, args["id"].(string)), true + + case "Query.IntegrationKey": + if e.complexity.Query.IntegrationKey == nil { + break + } + + args, err := ec.field_Query_integrationKey_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.IntegrationKey(childComplexity, args["id"].(string)), true + + case "Query.Labels": + if e.complexity.Query.Labels == nil { + break + } + + args, err := ec.field_Query_labels_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.Labels(childComplexity, args["input"].(*LabelSearchOptions)), true + + case "Query.Rotation": + if e.complexity.Query.Rotation == nil { + break + } + + args, err := ec.field_Query_rotation_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.Rotation(childComplexity, args["id"].(string)), true + + case "Query.Rotations": + if e.complexity.Query.Rotations == nil { + break + } + + args, err := ec.field_Query_rotations_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.Rotations(childComplexity, args["input"].(*RotationSearchOptions)), true + + case "Query.Schedule": + if e.complexity.Query.Schedule == nil { + break + } + + args, err := ec.field_Query_schedule_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.Schedule(childComplexity, args["id"].(string)), true + + case "Query.Schedules": + if e.complexity.Query.Schedules == nil { + break + } + + args, err := ec.field_Query_schedules_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.Schedules(childComplexity, args["input"].(*ScheduleSearchOptions)), true + + case "Query.Service": + if e.complexity.Query.Service == nil { + break + } + + args, err := ec.field_Query_service_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.Service(childComplexity, args["id"].(string)), true + + case "Query.Services": + if e.complexity.Query.Services == nil { + break + } + + args, err := ec.field_Query_services_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.Services(childComplexity, args["input"].(*ServiceSearchOptions)), true + + case "Query.SlackChannel": + if e.complexity.Query.SlackChannel == nil { + break + } + + args, err := ec.field_Query_slackChannel_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.SlackChannel(childComplexity, args["id"].(string)), true + + case "Query.SlackChannels": + if e.complexity.Query.SlackChannels == nil { + break + } + + args, err := ec.field_Query_slackChannels_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.SlackChannels(childComplexity, args["input"].(*SlackChannelSearchOptions)), true + + case "Query.TimeZones": + if e.complexity.Query.TimeZones == nil { + break + } + + args, err := ec.field_Query_timeZones_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.TimeZones(childComplexity, args["input"].(*TimeZoneSearchOptions)), true + + case "Query.User": + if e.complexity.Query.User == nil { + break + } + + args, err := ec.field_Query_user_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.User(childComplexity, args["id"].(*string)), true + + case "Query.UserContactMethod": + if e.complexity.Query.UserContactMethod == nil { + break + } + + args, err := ec.field_Query_userContactMethod_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.UserContactMethod(childComplexity, args["id"].(string)), true + + case "Query.UserOverride": + if e.complexity.Query.UserOverride == nil { + break + } + + args, err := ec.field_Query_userOverride_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.UserOverride(childComplexity, args["id"].(string)), true + + case "Query.UserOverrides": + if e.complexity.Query.UserOverrides == nil { + break + } + + args, err := ec.field_Query_userOverrides_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.UserOverrides(childComplexity, args["input"].(*UserOverrideSearchOptions)), true + + case "Query.Users": + if e.complexity.Query.Users == nil { + break + } + + args, err := ec.field_Query_users_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.Users(childComplexity, args["input"].(*UserSearchOptions), args["first"].(*int), args["after"].(*string), args["search"].(*string)), true + + case "Rotation.ActiveUserIndex": + if e.complexity.Rotation.ActiveUserIndex == nil { + break + } + + return e.complexity.Rotation.ActiveUserIndex(childComplexity), true + + case "Rotation.Description": + if e.complexity.Rotation.Description == nil { + break + } + + return e.complexity.Rotation.Description(childComplexity), true + + case "Rotation.ID": + if e.complexity.Rotation.ID == nil { + break + } + + return e.complexity.Rotation.ID(childComplexity), true + + case "Rotation.Name": + if e.complexity.Rotation.Name == nil { + break + } + + return e.complexity.Rotation.Name(childComplexity), true + + case "Rotation.NextHandoffTimes": + if e.complexity.Rotation.NextHandoffTimes == nil { + break + } + + args, err := ec.field_Rotation_nextHandoffTimes_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Rotation.NextHandoffTimes(childComplexity, args["num"].(*int)), true + + case "Rotation.ShiftLength": + if e.complexity.Rotation.ShiftLength == nil { + break + } + + return e.complexity.Rotation.ShiftLength(childComplexity), true + + case "Rotation.Start": + if e.complexity.Rotation.Start == nil { + break + } + + return e.complexity.Rotation.Start(childComplexity), true + + case "Rotation.TimeZone": + if e.complexity.Rotation.TimeZone == nil { + break + } + + return e.complexity.Rotation.TimeZone(childComplexity), true + + case "Rotation.Type": + if e.complexity.Rotation.Type == nil { + break + } + + return e.complexity.Rotation.Type(childComplexity), true + + case "Rotation.UserIDs": + if e.complexity.Rotation.UserIDs == nil { + break + } + + return e.complexity.Rotation.UserIDs(childComplexity), true + + case "Rotation.Users": + if e.complexity.Rotation.Users == nil { + break + } + + return e.complexity.Rotation.Users(childComplexity), true + + case "RotationConnection.Nodes": + if e.complexity.RotationConnection.Nodes == nil { + break + } + + return e.complexity.RotationConnection.Nodes(childComplexity), true + + case "RotationConnection.PageInfo": + if e.complexity.RotationConnection.PageInfo == nil { + break + } + + return e.complexity.RotationConnection.PageInfo(childComplexity), true + + case "Schedule.AssignedTo": + if e.complexity.Schedule.AssignedTo == nil { + break + } + + return e.complexity.Schedule.AssignedTo(childComplexity), true + + case "Schedule.Description": + if e.complexity.Schedule.Description == nil { + break + } + + return e.complexity.Schedule.Description(childComplexity), true + + case "Schedule.ID": + if e.complexity.Schedule.ID == nil { + break + } + + return e.complexity.Schedule.ID(childComplexity), true + + case "Schedule.Name": + if e.complexity.Schedule.Name == nil { + break + } + + return e.complexity.Schedule.Name(childComplexity), true + + case "Schedule.Shifts": + if e.complexity.Schedule.Shifts == nil { + break + } + + args, err := ec.field_Schedule_shifts_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Schedule.Shifts(childComplexity, args["start"].(time.Time), args["end"].(time.Time)), true + + case "Schedule.Target": + if e.complexity.Schedule.Target == nil { + break + } + + args, err := ec.field_Schedule_target_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Schedule.Target(childComplexity, args["input"].(assignment.RawTarget)), true + + case "Schedule.Targets": + if e.complexity.Schedule.Targets == nil { + break + } + + return e.complexity.Schedule.Targets(childComplexity), true + + case "Schedule.TimeZone": + if e.complexity.Schedule.TimeZone == nil { + break + } + + return e.complexity.Schedule.TimeZone(childComplexity), true + + case "ScheduleConnection.Nodes": + if e.complexity.ScheduleConnection.Nodes == nil { + break + } + + return e.complexity.ScheduleConnection.Nodes(childComplexity), true + + case "ScheduleConnection.PageInfo": + if e.complexity.ScheduleConnection.PageInfo == nil { + break + } + + return e.complexity.ScheduleConnection.PageInfo(childComplexity), true + + case "ScheduleRule.End": + if e.complexity.ScheduleRule.End == nil { + break + } + + return e.complexity.ScheduleRule.End(childComplexity), true + + case "ScheduleRule.ID": + if e.complexity.ScheduleRule.ID == nil { + break + } + + return e.complexity.ScheduleRule.ID(childComplexity), true + + case "ScheduleRule.ScheduleID": + if e.complexity.ScheduleRule.ScheduleID == nil { + break + } + + return e.complexity.ScheduleRule.ScheduleID(childComplexity), true + + case "ScheduleRule.Start": + if e.complexity.ScheduleRule.Start == nil { + break + } + + return e.complexity.ScheduleRule.Start(childComplexity), true + + case "ScheduleRule.Target": + if e.complexity.ScheduleRule.Target == nil { + break + } + + return e.complexity.ScheduleRule.Target(childComplexity), true + + case "ScheduleRule.WeekdayFilter": + if e.complexity.ScheduleRule.WeekdayFilter == nil { + break + } + + return e.complexity.ScheduleRule.WeekdayFilter(childComplexity), true + + case "ScheduleTarget.Rules": + if e.complexity.ScheduleTarget.Rules == nil { + break + } + + return e.complexity.ScheduleTarget.Rules(childComplexity), true + + case "ScheduleTarget.ScheduleID": + if e.complexity.ScheduleTarget.ScheduleID == nil { + break + } + + return e.complexity.ScheduleTarget.ScheduleID(childComplexity), true + + case "ScheduleTarget.Target": + if e.complexity.ScheduleTarget.Target == nil { + break + } + + return e.complexity.ScheduleTarget.Target(childComplexity), true + + case "Service.Description": + if e.complexity.Service.Description == nil { + break + } + + return e.complexity.Service.Description(childComplexity), true + + case "Service.EscalationPolicy": + if e.complexity.Service.EscalationPolicy == nil { + break + } + + return e.complexity.Service.EscalationPolicy(childComplexity), true + + case "Service.EscalationPolicyID": + if e.complexity.Service.EscalationPolicyID == nil { + break + } + + return e.complexity.Service.EscalationPolicyID(childComplexity), true + + case "Service.ID": + if e.complexity.Service.ID == nil { + break + } + + return e.complexity.Service.ID(childComplexity), true + + case "Service.IntegrationKeys": + if e.complexity.Service.IntegrationKeys == nil { + break + } + + return e.complexity.Service.IntegrationKeys(childComplexity), true + + case "Service.IsFavorite": + if e.complexity.Service.IsFavorite == nil { + break + } + + return e.complexity.Service.IsFavorite(childComplexity), true + + case "Service.Labels": + if e.complexity.Service.Labels == nil { + break + } + + return e.complexity.Service.Labels(childComplexity), true + + case "Service.Name": + if e.complexity.Service.Name == nil { + break + } + + return e.complexity.Service.Name(childComplexity), true + + case "Service.OnCallUsers": + if e.complexity.Service.OnCallUsers == nil { + break + } + + return e.complexity.Service.OnCallUsers(childComplexity), true + + case "ServiceConnection.Nodes": + if e.complexity.ServiceConnection.Nodes == nil { + break + } + + return e.complexity.ServiceConnection.Nodes(childComplexity), true + + case "ServiceConnection.PageInfo": + if e.complexity.ServiceConnection.PageInfo == nil { + break + } + + return e.complexity.ServiceConnection.PageInfo(childComplexity), true + + case "ServiceOnCallUser.StepNumber": + if e.complexity.ServiceOnCallUser.StepNumber == nil { + break + } + + return e.complexity.ServiceOnCallUser.StepNumber(childComplexity), true + + case "ServiceOnCallUser.UserID": + if e.complexity.ServiceOnCallUser.UserID == nil { + break + } + + return e.complexity.ServiceOnCallUser.UserID(childComplexity), true + + case "ServiceOnCallUser.UserName": + if e.complexity.ServiceOnCallUser.UserName == nil { + break + } + + return e.complexity.ServiceOnCallUser.UserName(childComplexity), true + + case "SlackChannel.ID": + if e.complexity.SlackChannel.ID == nil { + break + } + + return e.complexity.SlackChannel.ID(childComplexity), true + + case "SlackChannel.Name": + if e.complexity.SlackChannel.Name == nil { + break + } + + return e.complexity.SlackChannel.Name(childComplexity), true + + case "SlackChannelConnection.Nodes": + if e.complexity.SlackChannelConnection.Nodes == nil { + break + } + + return e.complexity.SlackChannelConnection.Nodes(childComplexity), true + + case "SlackChannelConnection.PageInfo": + if e.complexity.SlackChannelConnection.PageInfo == nil { + break + } + + return e.complexity.SlackChannelConnection.PageInfo(childComplexity), true + + case "Target.ID": + if e.complexity.Target.ID == nil { + break + } + + return e.complexity.Target.ID(childComplexity), true + + case "Target.Name": + if e.complexity.Target.Name == nil { + break + } + + return e.complexity.Target.Name(childComplexity), true + + case "Target.Type": + if e.complexity.Target.Type == nil { + break + } + + return e.complexity.Target.Type(childComplexity), true + + case "TimeZone.ID": + if e.complexity.TimeZone.ID == nil { + break + } + + return e.complexity.TimeZone.ID(childComplexity), true + + case "TimeZoneConnection.Nodes": + if e.complexity.TimeZoneConnection.Nodes == nil { + break + } + + return e.complexity.TimeZoneConnection.Nodes(childComplexity), true + + case "TimeZoneConnection.PageInfo": + if e.complexity.TimeZoneConnection.PageInfo == nil { + break + } + + return e.complexity.TimeZoneConnection.PageInfo(childComplexity), true + + case "User.AlertStatusCMID": + if e.complexity.User.AlertStatusCMID == nil { + break + } + + return e.complexity.User.AlertStatusCMID(childComplexity), true + + case "User.AuthSubjects": + if e.complexity.User.AuthSubjects == nil { + break + } + + return e.complexity.User.AuthSubjects(childComplexity), true + + case "User.ContactMethods": + if e.complexity.User.ContactMethods == nil { + break + } + + return e.complexity.User.ContactMethods(childComplexity), true + + case "User.Email": + if e.complexity.User.Email == nil { + break + } + + return e.complexity.User.Email(childComplexity), true + + case "User.ID": + if e.complexity.User.ID == nil { + break + } + + return e.complexity.User.ID(childComplexity), true + + case "User.Name": + if e.complexity.User.Name == nil { + break + } + + return e.complexity.User.Name(childComplexity), true + + case "User.NotificationRules": + if e.complexity.User.NotificationRules == nil { + break + } + + return e.complexity.User.NotificationRules(childComplexity), true + + case "User.OnCallSteps": + if e.complexity.User.OnCallSteps == nil { + break + } + + return e.complexity.User.OnCallSteps(childComplexity), true + + case "User.Role": + if e.complexity.User.Role == nil { + break + } + + return e.complexity.User.Role(childComplexity), true + + case "UserConnection.Nodes": + if e.complexity.UserConnection.Nodes == nil { + break + } + + return e.complexity.UserConnection.Nodes(childComplexity), true + + case "UserConnection.PageInfo": + if e.complexity.UserConnection.PageInfo == nil { + break + } + + return e.complexity.UserConnection.PageInfo(childComplexity), true + + case "UserContactMethod.ID": + if e.complexity.UserContactMethod.ID == nil { + break + } + + return e.complexity.UserContactMethod.ID(childComplexity), true + + case "UserContactMethod.Name": + if e.complexity.UserContactMethod.Name == nil { + break + } + + return e.complexity.UserContactMethod.Name(childComplexity), true + + case "UserContactMethod.Type": + if e.complexity.UserContactMethod.Type == nil { + break + } + + return e.complexity.UserContactMethod.Type(childComplexity), true + + case "UserContactMethod.Value": + if e.complexity.UserContactMethod.Value == nil { + break + } + + return e.complexity.UserContactMethod.Value(childComplexity), true + + case "UserNotificationRule.ContactMethod": + if e.complexity.UserNotificationRule.ContactMethod == nil { + break + } + + return e.complexity.UserNotificationRule.ContactMethod(childComplexity), true + + case "UserNotificationRule.ContactMethodID": + if e.complexity.UserNotificationRule.ContactMethodID == nil { + break + } + + return e.complexity.UserNotificationRule.ContactMethodID(childComplexity), true + + case "UserNotificationRule.DelayMinutes": + if e.complexity.UserNotificationRule.DelayMinutes == nil { + break + } + + return e.complexity.UserNotificationRule.DelayMinutes(childComplexity), true + + case "UserNotificationRule.ID": + if e.complexity.UserNotificationRule.ID == nil { + break + } + + return e.complexity.UserNotificationRule.ID(childComplexity), true + + case "UserOverride.AddUser": + if e.complexity.UserOverride.AddUser == nil { + break + } + + return e.complexity.UserOverride.AddUser(childComplexity), true + + case "UserOverride.AddUserID": + if e.complexity.UserOverride.AddUserID == nil { + break + } + + return e.complexity.UserOverride.AddUserID(childComplexity), true + + case "UserOverride.End": + if e.complexity.UserOverride.End == nil { + break + } + + return e.complexity.UserOverride.End(childComplexity), true + + case "UserOverride.ID": + if e.complexity.UserOverride.ID == nil { + break + } + + return e.complexity.UserOverride.ID(childComplexity), true + + case "UserOverride.RemoveUser": + if e.complexity.UserOverride.RemoveUser == nil { + break + } + + return e.complexity.UserOverride.RemoveUser(childComplexity), true + + case "UserOverride.RemoveUserID": + if e.complexity.UserOverride.RemoveUserID == nil { + break + } + + return e.complexity.UserOverride.RemoveUserID(childComplexity), true + + case "UserOverride.Start": + if e.complexity.UserOverride.Start == nil { + break + } + + return e.complexity.UserOverride.Start(childComplexity), true + + case "UserOverride.Target": + if e.complexity.UserOverride.Target == nil { + break + } + + return e.complexity.UserOverride.Target(childComplexity), true + + case "UserOverrideConnection.Nodes": + if e.complexity.UserOverrideConnection.Nodes == nil { + break + } + + return e.complexity.UserOverrideConnection.Nodes(childComplexity), true + + case "UserOverrideConnection.PageInfo": + if e.complexity.UserOverrideConnection.PageInfo == nil { + break + } + + return e.complexity.UserOverrideConnection.PageInfo(childComplexity), true + + } + return 0, false +} + +func (e *executableSchema) Query(ctx context.Context, op *ast.OperationDefinition) *graphql.Response { + ec := executionContext{graphql.GetRequestContext(ctx), e} + + buf := ec.RequestMiddleware(ctx, func(ctx context.Context) []byte { + data := ec._Query(ctx, op.SelectionSet) + var buf bytes.Buffer + data.MarshalGQL(&buf) + return buf.Bytes() + }) + + return &graphql.Response{ + Data: buf, + Errors: ec.Errors, + Extensions: ec.Extensions, + } +} + +func (e *executableSchema) Mutation(ctx context.Context, op *ast.OperationDefinition) *graphql.Response { + ec := executionContext{graphql.GetRequestContext(ctx), e} + + buf := ec.RequestMiddleware(ctx, func(ctx context.Context) []byte { + data := ec._Mutation(ctx, op.SelectionSet) + var buf bytes.Buffer + data.MarshalGQL(&buf) + return buf.Bytes() + }) + + return &graphql.Response{ + Data: buf, + Errors: ec.Errors, + Extensions: ec.Extensions, + } +} + +func (e *executableSchema) Subscription(ctx context.Context, op *ast.OperationDefinition) func() *graphql.Response { + return graphql.OneShot(graphql.ErrorResponse(ctx, "subscriptions are not supported")) +} + +type executionContext struct { + *graphql.RequestContext + *executableSchema +} + +func (ec *executionContext) FieldMiddleware(ctx context.Context, obj interface{}, next graphql.Resolver) (ret interface{}) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + res, err := ec.ResolverMiddleware(ctx, next) + if err != nil { + ec.Error(ctx, err) + return nil + } + return res +} + +func (ec *executionContext) introspectSchema() (*introspection.Schema, error) { + if ec.DisableIntrospection { + return nil, errors.New("introspection disabled") + } + return introspection.WrapSchema(parsedSchema), nil +} + +func (ec *executionContext) introspectType(name string) (*introspection.Type, error) { + if ec.DisableIntrospection { + return nil, errors.New("introspection disabled") + } + return introspection.WrapTypeFromDef(parsedSchema, parsedSchema.Types[name]), nil +} + +var parsedSchema = gqlparser.MustLoadSchema( + &ast.Source{Name: "./schema.graphql", Input: `type Query { + # Returns the user with the given ID. If no ID is specified, + # the current user is implied. + user(id: ID): User + + # Returns a list of users who's name or email match search string. + users( + input: UserSearchOptions + first: Int = 15 + after: String = "" + search: String = "" + ): UserConnection! + + # Returns a single alert with the given ID. + alert(id: Int!): Alert + + # Returns a paginated list of alerts. + alerts(input: AlertSearchOptions): AlertConnection! + + # Returns a single service with the given ID. + service(id: ID!): Service + + # Returns a single integration key with the given ID. + integrationKey(id: ID!): IntegrationKey + + # Returns a paginated list of services. + services(input: ServiceSearchOptions): ServiceConnection! + + # Returns a single rotation with the given ID. + rotation(id: ID!): Rotation + + # Returns a paginated list of rotations. + rotations(input: RotationSearchOptions): RotationConnection! + + # Returns a single schedule with the given ID. + schedule(id: ID!): Schedule + + # Returns a paginated list of schedules. + schedules(input: ScheduleSearchOptions): ScheduleConnection! + + # Returns a single escalation policy with the given ID. + escalationPolicy(id: ID!): EscalationPolicy + + # Returns a paginated list of escalation policies. + escalationPolicies( + input: EscalationPolicySearchOptions + ): EscalationPolicyConnection! + + # Returns the list of auth subjects for the given provider ID. + authSubjectsForProvider( + first: Int = 15 + after: String = "" + providerID: ID! + ): AuthSubjectConnection! + + # Returns a paginated list of time zones. + timeZones(input: TimeZoneSearchOptions): TimeZoneConnection! + + # Allows searching for assigned labels. + labels(input: LabelSearchOptions): LabelConnection! + + # Allows searching for user overrides. + userOverrides(input: UserOverrideSearchOptions): UserOverrideConnection! + + # Returns a single user override with the given ID. + userOverride(id: ID!): UserOverride + + # Returns public server configuration values. If all is set to true, + # then all values are returned (must be admin). + config(all: Boolean): [ConfigValue!]! + + # Returns a contact method with the given ID. + userContactMethod(id: ID!): UserContactMethod + + # Returns the list of Slack channels available to the current user. + slackChannels(input: SlackChannelSearchOptions): SlackChannelConnection! + + # Returns a Slack channel with the given ID. + slackChannel(id: ID!): SlackChannel +} + +input SlackChannelSearchOptions { + first: Int = 15 + after: String = "" + search: String = "" + omit: [ID!] +} + +type SlackChannel { + id: ID! + name: String! +} + +type SlackChannelConnection { + nodes: [SlackChannel!]! + pageInfo: PageInfo! +} + +type ConfigValue { + id: String! + description: String! + value: String! + type: ConfigType! + password: Boolean! +} +enum ConfigType { + string + stringList + integer + boolean +} + +input UserOverrideSearchOptions { + first: Int = 15 + after: String = "" + omit: [ID!] + + scheduleID: ID # limit search to a single schedule + filterAddUserID: [ID!] # only return overrides where the provided users have been added to a schedule (add or replace types). + filterRemoveUserID: [ID!] # only return overrides where the provided users have been removed from a schedule (remove or replace types). + filterAnyUserID: [ID!] # only return overrides that add/remove/replace at least one of the provided user IDs. + start: ISOTimestamp # start of the window to search for. + end: ISOTimestamp # end of the window to search for. +} + +type UserOverrideConnection { + nodes: [UserOverride!]! + pageInfo: PageInfo! +} +type UserOverride { + id: ID! + + start: ISOTimestamp! + end: ISOTimestamp! + + addUserID: ID! + removeUserID: ID! + + addUser: User + removeUser: User + + target: Target! +} +input LabelSearchOptions { + first: Int = 15 + after: String = "" + search: String = "" + uniqueKeys: Boolean = false + omit: [ID!] +} + +type LabelConnection { + nodes: [Label!]! + pageInfo: PageInfo! +} + +type Mutation { + addAuthSubject(input: AuthSubjectInput!): Boolean! + deleteAuthSubject(input: AuthSubjectInput!): Boolean! + updateUser(input: UpdateUserInput!): Boolean! + + testContactMethod(id: ID!): Boolean! + + # Updates the status for multiple alerts given the list of alertIDs and the status they want to be updated to. + updateAlerts(input: UpdateAlertsInput!): [Alert!] + + # Updates the fields for a rotation given the rotationID, also updates ordering of and number of users for the rotation. + updateRotation(input: UpdateRotationInput!): Boolean! + + # Escalates multiple alerts given the list of alertIDs. + escalateAlerts(input: [Int!]): [Alert!] + + # Updates the favorite status of a target. + setFavorite(input: SetFavoriteInput!): Boolean! + + updateService(input: UpdateServiceInput!): Boolean! + updateEscalationPolicy(input: UpdateEscalationPolicyInput!): Boolean! + updateEscalationPolicyStep(input: UpdateEscalationPolicyStepInput!): Boolean! + + deleteAll(input: [TargetInput!]): Boolean! + + createService(input: CreateServiceInput!): Service + createEscalationPolicy(input: CreateEscalationPolicyInput!): EscalationPolicy + createEscalationPolicyStep( + input: CreateEscalationPolicyStepInput! + ): EscalationPolicyStep + createRotation(input: CreateRotationInput!): Rotation + + createIntegrationKey(input: CreateIntegrationKeyInput!): IntegrationKey + + setLabel(input: SetLabelInput!): Boolean! + + createSchedule(input: CreateScheduleInput!): Schedule + updateScheduleTarget(input: ScheduleTargetInput!): Boolean! + createUserOverride(input: CreateUserOverrideInput!): UserOverride + + createUserContactMethod( + input: CreateUserContactMethodInput! + ): UserContactMethod + createUserNotificationRule( + input: CreateUserNotificationRuleInput! + ): UserNotificationRule + updateUserContactMethod(input: UpdateUserContactMethodInput!): Boolean! + + updateSchedule(input: UpdateScheduleInput!): Boolean! + updateUserOverride(input: UpdateUserOverrideInput!): Boolean! + + setConfig(input: [ConfigValueInput!]): Boolean! +} + +input ConfigValueInput { + id: String! + value: String! +} + +input UpdateUserOverrideInput { + id: ID! + + start: ISOTimestamp + end: ISOTimestamp + + addUserID: ID + removeUserID: ID +} + +input CreateUserOverrideInput { + scheduleID: ID! + + start: ISOTimestamp! + end: ISOTimestamp! + + addUserID: ID + removeUserID: ID +} + +input CreateScheduleInput { + name: String! + description: String + timeZone: String! + + targets: [ScheduleTargetInput!] +} + +input ScheduleTargetInput { + scheduleID: ID + target: TargetInput + newRotation: CreateRotationInput + rules: [ScheduleRuleInput!]! +} + +input ScheduleRuleInput { + id: ID + start: ClockTime + end: ClockTime + + # weekdayFilter is a 7-item array that indicates if the rule + # is active on each weekday, starting with Sunday. + weekdayFilter: [Boolean!] +} + +input SetLabelInput { + target: TargetInput + key: String! + + # If value is empty, the label is removed. + value: String! +} + +input TimeZoneSearchOptions { + first: Int = 15 + after: String = "" + search: String = "" + omit: [ID!] +} + +type TimeZoneConnection { + nodes: [TimeZone!]! + pageInfo: PageInfo! +} + +type TimeZone { + id: String! +} + +input CreateServiceInput { + name: String! + description: String = "" + + escalationPolicyID: ID + newEscalationPolicy: CreateEscalationPolicyInput + newIntegrationKeys: [CreateIntegrationKeyInput!] + labels: [SetLabelInput!] +} + +input CreateEscalationPolicyInput { + name: String! + description: String = "" + repeat: Int = 3 + + steps: [CreateEscalationPolicyStepInput!] +} + +input CreateEscalationPolicyStepInput { + escalationPolicyID: ID + + delayMinutes: Int! + + targets: [TargetInput!] + newRotation: CreateRotationInput + newSchedule: CreateScheduleInput +} + +type EscalationPolicyStep { + id: ID! + stepNumber: Int! + delayMinutes: Int! + targets: [Target!]! + escalationPolicy: EscalationPolicy +} + +input UpdateScheduleInput { + id: ID! + name: String + description: String + timeZone: String +} + +input UpdateServiceInput { + id: ID! + name: String + description: String + escalationPolicyID: ID +} + +input UpdateEscalationPolicyInput { + id: ID! + name: String + description: String + repeat: Int + stepIDs: [String!] +} + +input UpdateEscalationPolicyStepInput { + id: ID! + delayMinutes: Int + targets: [TargetInput!] +} + +input SetFavoriteInput { + target: TargetInput! + favorite: Boolean! +} + +type EscalationPolicyConnection { + nodes: [EscalationPolicy!]! + pageInfo: PageInfo! +} + +type AlertConnection { + nodes: [Alert!]! + pageInfo: PageInfo! +} + +type ScheduleConnection { + nodes: [Schedule!]! + pageInfo: PageInfo! +} + +type Schedule { + id: ID! + name: String! + description: String! + timeZone: String! + + assignedTo: [Target!]! + shifts(start: ISOTimestamp!, end: ISOTimestamp!): [OnCallShift!]! + + targets: [ScheduleTarget!]! + target(input: TargetInput!): ScheduleTarget +} + +type OnCallShift { + userID: ID! + user: User + start: ISOTimestamp! + end: ISOTimestamp! + truncated: Boolean! +} + +type ScheduleTarget { + scheduleID: ID! + target: Target! + rules: [ScheduleRule!]! +} + +type ScheduleRule { + id: ID! + scheduleID: ID! + + start: ClockTime! + end: ClockTime! + + # weekdayFilter is a 7-item array that indicates if the rule + # is active on each weekday, starting with Sunday. + weekdayFilter: [Boolean!]! + + target: Target! +} + +type RotationConnection { + nodes: [Rotation!]! + pageInfo: PageInfo! +} + +input CreateRotationInput { + name: String! + description: String + + timeZone: String! + start: ISOTimestamp! + + type: RotationType! + shiftLength: Int = 1 + + userIDs: [ID!] +} + +type Rotation { + id: ID! + name: String! + description: String! + + start: ISOTimestamp! + timeZone: String! + + type: RotationType! + shiftLength: Int! + + activeUserIndex: Int! + + userIDs: [ID!]! + users: [User!]! + + nextHandoffTimes(num: Int): [ISOTimestamp!]! +} + +enum RotationType { + weekly + daily + hourly +} + +input UpdateAlertsInput { + # List of alertIDs. + alertIDs: [Int!]! + + newStatus: AlertStatus! +} + +input UpdateRotationInput { + id: ID! + + name: String + description: String + timeZone: String + start: ISOTimestamp + type: RotationType + shiftLength: Int + + activeUserIndex: Int + + # activeUserIndex will not be changed, as the index will remain the same. + # On call user may change since whatever index is put into activeUserIndex will be on call. + userIDs: [ID!] +} + +input RotationSearchOptions { + first: Int = 15 + after: String = "" + search: String = "" + omit: [ID!] +} + +input EscalationPolicySearchOptions { + first: Int = 15 + after: String = "" + search: String = "" + omit: [ID!] +} + +input ScheduleSearchOptions { + first: Int = 15 + after: String = "" + search: String = "" + omit: [ID!] +} + +input ServiceSearchOptions { + first: Int = 15 + after: String = "" + search: String = "" + omit: [ID!] + + # Include only favorited services in the results. + favoritesOnly: Boolean = false + + # Sort favorite services first. + favoritesFirst: Boolean = false +} + +input UserSearchOptions { + first: Int = 15 + after: String = "" + search: String = "" + omit: [ID!] +} + +input AlertSearchOptions { + filterByStatus: [AlertStatus!] + filterByServiceID: [ID!] + search: String = "" + first: Int = 15 + after: String = "" + favoritesOnly: Boolean = false + omit: [Int!] +} + +# An ISOTimestamp is an RFC3339-formatted timestamp string. +scalar ISOTimestamp + +# ClockTime is a 24-hour time in the format 00:00 +scalar ClockTime + +type Alert { + id: ID! + alertID: Int! + status: AlertStatus! + summary: String! + details: String! + createdAt: ISOTimestamp! + serviceID: ID! + service: Service + + # Escalation Policy State for the alert. + state: AlertState +} + +# The escalation policy state details for the alert. +type AlertState { + lastEscalation: ISOTimestamp! + stepNumber: Int! + repeatCount: Int! +} + +type Service { + id: ID! + name: String! + description: String! + escalationPolicyID: ID! + escalationPolicy: EscalationPolicy + isFavorite: Boolean! + + onCallUsers: [ServiceOnCallUser!]! + integrationKeys: [IntegrationKey!]! + labels: [Label!]! +} + +input CreateIntegrationKeyInput { + serviceID: ID + type: IntegrationKeyType! + name: String! +} + +type Label { + key: String! + value: String! +} + +type IntegrationKey { + id: ID! + serviceID: ID! + type: IntegrationKeyType! + name: String! + href: String! +} + +enum IntegrationKeyType { + generic + grafana + email +} + +type ServiceOnCallUser { + userID: ID! + userName: String! + stepNumber: Int! +} + +type EscalationPolicy { + id: ID! + name: String! + description: String! + repeat: Int! + + assignedTo: [Target!]! + steps: [EscalationPolicyStep!]! +} + +# Different Alert Status. +enum AlertStatus { + StatusAcknowledged + StatusClosed + StatusUnacknowledged +} + +type Target { + id: ID! + type: TargetType! + name: String +} + +input TargetInput { + id: ID! + type: TargetType! +} + +enum TargetType { + escalationPolicy + notificationChannel + slackChannel + notificationPolicy + rotation + service + schedule + user + integrationKey + userOverride + notificationRule + contactMethod +} + +type ServiceConnection { + nodes: [Service!]! + pageInfo: PageInfo! +} + +type UserConnection { + nodes: [User!]! + pageInfo: PageInfo! +} + +type AuthSubjectConnection { + nodes: [AuthSubject!]! + pageInfo: PageInfo! +} + +type PageInfo { + endCursor: String + hasNextPage: Boolean! +} + +input UpdateUserInput { + id: ID! + name: String + email: String + role: UserRole + + statusUpdateContactMethodID: ID +} + +input AuthSubjectInput { + userID: ID! + providerID: ID! + subjectID: ID! +} + +enum UserRole { + unknown + user + admin +} + +type User { + id: ID! + + role: UserRole! + + # The user's configured name. + name: String! + + # Email of the user. + email: String! + + contactMethods: [UserContactMethod!]! + notificationRules: [UserNotificationRule!]! + + statusUpdateContactMethodID: ID! + + authSubjects: [AuthSubject!]! + + onCallSteps: [EscalationPolicyStep!]! +} + +type UserNotificationRule { + id: ID! + delayMinutes: Int! + + contactMethodID: ID! + contactMethod: UserContactMethod +} + +enum ContactMethodType { + SMS + VOICE +} + +# A method of contacting a user. +type UserContactMethod { + id: ID! + + type: ContactMethodType + + # User-defined label for this contact method. + name: String! + + value: String! +} + +input CreateUserContactMethodInput { + userID: ID! + + type: ContactMethodType! + name: String! + value: String! + newUserNotificationRule: CreateUserNotificationRuleInput +} + +input CreateUserNotificationRuleInput { + userID: ID + contactMethodID: ID + delayMinutes: Int! +} + +input UpdateUserContactMethodInput { + id: ID! + + name: String + value: String +} + +type AuthSubject { + providerID: ID! + subjectID: ID! + userID: ID! +} +`}, +) + +// endregion ************************** generated!.gotpl ************************** + +// region ***************************** args.gotpl ***************************** + +func (ec *executionContext) field_Mutation_addAuthSubject_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 user.AuthSubject + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalNAuthSubjectInput2githubᚗcomᚋtargetᚋgoalertᚋuserᚐAuthSubject(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_createEscalationPolicyStep_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 CreateEscalationPolicyStepInput + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalNCreateEscalationPolicyStepInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateEscalationPolicyStepInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_createEscalationPolicy_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 CreateEscalationPolicyInput + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalNCreateEscalationPolicyInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateEscalationPolicyInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_createIntegrationKey_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 CreateIntegrationKeyInput + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalNCreateIntegrationKeyInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateIntegrationKeyInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_createRotation_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 CreateRotationInput + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalNCreateRotationInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateRotationInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_createSchedule_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 CreateScheduleInput + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalNCreateScheduleInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateScheduleInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_createService_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 CreateServiceInput + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalNCreateServiceInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateServiceInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_createUserContactMethod_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 CreateUserContactMethodInput + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalNCreateUserContactMethodInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateUserContactMethodInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_createUserNotificationRule_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 CreateUserNotificationRuleInput + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalNCreateUserNotificationRuleInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateUserNotificationRuleInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_createUserOverride_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 CreateUserOverrideInput + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalNCreateUserOverrideInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateUserOverrideInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_deleteAll_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 []assignment.RawTarget + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalOTargetInput2ᚕgithub.comᚋtargetᚋgoalertᚋassignmentᚐRawTarget(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_deleteAuthSubject_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 user.AuthSubject + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalNAuthSubjectInput2githubᚗcomᚋtargetᚋgoalertᚋuserᚐAuthSubject(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_escalateAlerts_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 []int + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalOInt2ᚕint(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_setConfig_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 []ConfigValueInput + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalOConfigValueInput2ᚕgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐConfigValueInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_setFavorite_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 SetFavoriteInput + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalNSetFavoriteInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐSetFavoriteInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_setLabel_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 SetLabelInput + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalNSetLabelInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐSetLabelInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_testContactMethod_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 string + if tmp, ok := rawArgs["id"]; ok { + arg0, err = ec.unmarshalNID2string(ctx, tmp) + if err != nil { + return nil, err + } + } + args["id"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_updateAlerts_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 UpdateAlertsInput + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalNUpdateAlertsInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUpdateAlertsInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_updateEscalationPolicyStep_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 UpdateEscalationPolicyStepInput + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalNUpdateEscalationPolicyStepInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUpdateEscalationPolicyStepInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_updateEscalationPolicy_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 UpdateEscalationPolicyInput + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalNUpdateEscalationPolicyInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUpdateEscalationPolicyInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_updateRotation_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 UpdateRotationInput + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalNUpdateRotationInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUpdateRotationInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_updateScheduleTarget_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 ScheduleTargetInput + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalNScheduleTargetInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐScheduleTargetInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_updateSchedule_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 UpdateScheduleInput + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalNUpdateScheduleInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUpdateScheduleInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_updateService_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 UpdateServiceInput + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalNUpdateServiceInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUpdateServiceInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_updateUserContactMethod_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 UpdateUserContactMethodInput + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalNUpdateUserContactMethodInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUpdateUserContactMethodInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_updateUserOverride_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 UpdateUserOverrideInput + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalNUpdateUserOverrideInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUpdateUserOverrideInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Mutation_updateUser_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 UpdateUserInput + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalNUpdateUserInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUpdateUserInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Query___type_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 string + if tmp, ok := rawArgs["name"]; ok { + arg0, err = ec.unmarshalNString2string(ctx, tmp) + if err != nil { + return nil, err + } + } + args["name"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Query_alert_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 int + if tmp, ok := rawArgs["id"]; ok { + arg0, err = ec.unmarshalNInt2int(ctx, tmp) + if err != nil { + return nil, err + } + } + args["id"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Query_alerts_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 *AlertSearchOptions + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalOAlertSearchOptions2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐAlertSearchOptions(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Query_authSubjectsForProvider_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 *int + if tmp, ok := rawArgs["first"]; ok { + arg0, err = ec.unmarshalOInt2ᚖint(ctx, tmp) + if err != nil { + return nil, err + } + } + args["first"] = arg0 + var arg1 *string + if tmp, ok := rawArgs["after"]; ok { + arg1, err = ec.unmarshalOString2ᚖstring(ctx, tmp) + if err != nil { + return nil, err + } + } + args["after"] = arg1 + var arg2 string + if tmp, ok := rawArgs["providerID"]; ok { + arg2, err = ec.unmarshalNID2string(ctx, tmp) + if err != nil { + return nil, err + } + } + args["providerID"] = arg2 + return args, nil +} + +func (ec *executionContext) field_Query_config_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 *bool + if tmp, ok := rawArgs["all"]; ok { + arg0, err = ec.unmarshalOBoolean2ᚖbool(ctx, tmp) + if err != nil { + return nil, err + } + } + args["all"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Query_escalationPolicies_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 *EscalationPolicySearchOptions + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalOEscalationPolicySearchOptions2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐEscalationPolicySearchOptions(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Query_escalationPolicy_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 string + if tmp, ok := rawArgs["id"]; ok { + arg0, err = ec.unmarshalNID2string(ctx, tmp) + if err != nil { + return nil, err + } + } + args["id"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Query_integrationKey_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 string + if tmp, ok := rawArgs["id"]; ok { + arg0, err = ec.unmarshalNID2string(ctx, tmp) + if err != nil { + return nil, err + } + } + args["id"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Query_labels_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 *LabelSearchOptions + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalOLabelSearchOptions2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐLabelSearchOptions(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Query_rotation_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 string + if tmp, ok := rawArgs["id"]; ok { + arg0, err = ec.unmarshalNID2string(ctx, tmp) + if err != nil { + return nil, err + } + } + args["id"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Query_rotations_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 *RotationSearchOptions + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalORotationSearchOptions2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐRotationSearchOptions(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Query_schedule_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 string + if tmp, ok := rawArgs["id"]; ok { + arg0, err = ec.unmarshalNID2string(ctx, tmp) + if err != nil { + return nil, err + } + } + args["id"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Query_schedules_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 *ScheduleSearchOptions + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalOScheduleSearchOptions2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐScheduleSearchOptions(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Query_service_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 string + if tmp, ok := rawArgs["id"]; ok { + arg0, err = ec.unmarshalNID2string(ctx, tmp) + if err != nil { + return nil, err + } + } + args["id"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Query_services_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 *ServiceSearchOptions + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalOServiceSearchOptions2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐServiceSearchOptions(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Query_slackChannel_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 string + if tmp, ok := rawArgs["id"]; ok { + arg0, err = ec.unmarshalNID2string(ctx, tmp) + if err != nil { + return nil, err + } + } + args["id"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Query_slackChannels_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 *SlackChannelSearchOptions + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalOSlackChannelSearchOptions2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐSlackChannelSearchOptions(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Query_timeZones_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 *TimeZoneSearchOptions + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalOTimeZoneSearchOptions2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐTimeZoneSearchOptions(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Query_userContactMethod_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 string + if tmp, ok := rawArgs["id"]; ok { + arg0, err = ec.unmarshalNID2string(ctx, tmp) + if err != nil { + return nil, err + } + } + args["id"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Query_userOverride_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 string + if tmp, ok := rawArgs["id"]; ok { + arg0, err = ec.unmarshalNID2string(ctx, tmp) + if err != nil { + return nil, err + } + } + args["id"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Query_userOverrides_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 *UserOverrideSearchOptions + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalOUserOverrideSearchOptions2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐUserOverrideSearchOptions(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Query_user_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 *string + if tmp, ok := rawArgs["id"]; ok { + arg0, err = ec.unmarshalOID2ᚖstring(ctx, tmp) + if err != nil { + return nil, err + } + } + args["id"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Query_users_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 *UserSearchOptions + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalOUserSearchOptions2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐUserSearchOptions(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + var arg1 *int + if tmp, ok := rawArgs["first"]; ok { + arg1, err = ec.unmarshalOInt2ᚖint(ctx, tmp) + if err != nil { + return nil, err + } + } + args["first"] = arg1 + var arg2 *string + if tmp, ok := rawArgs["after"]; ok { + arg2, err = ec.unmarshalOString2ᚖstring(ctx, tmp) + if err != nil { + return nil, err + } + } + args["after"] = arg2 + var arg3 *string + if tmp, ok := rawArgs["search"]; ok { + arg3, err = ec.unmarshalOString2ᚖstring(ctx, tmp) + if err != nil { + return nil, err + } + } + args["search"] = arg3 + return args, nil +} + +func (ec *executionContext) field_Rotation_nextHandoffTimes_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 *int + if tmp, ok := rawArgs["num"]; ok { + arg0, err = ec.unmarshalOInt2ᚖint(ctx, tmp) + if err != nil { + return nil, err + } + } + args["num"] = arg0 + return args, nil +} + +func (ec *executionContext) field_Schedule_shifts_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 time.Time + if tmp, ok := rawArgs["start"]; ok { + arg0, err = ec.unmarshalNISOTimestamp2timeᚐTime(ctx, tmp) + if err != nil { + return nil, err + } + } + args["start"] = arg0 + var arg1 time.Time + if tmp, ok := rawArgs["end"]; ok { + arg1, err = ec.unmarshalNISOTimestamp2timeᚐTime(ctx, tmp) + if err != nil { + return nil, err + } + } + args["end"] = arg1 + return args, nil +} + +func (ec *executionContext) field_Schedule_target_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 assignment.RawTarget + if tmp, ok := rawArgs["input"]; ok { + arg0, err = ec.unmarshalNTargetInput2githubᚗcomᚋtargetᚋgoalertᚋassignmentᚐRawTarget(ctx, tmp) + if err != nil { + return nil, err + } + } + args["input"] = arg0 + return args, nil +} + +func (ec *executionContext) field___Type_enumValues_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 bool + if tmp, ok := rawArgs["includeDeprecated"]; ok { + arg0, err = ec.unmarshalOBoolean2bool(ctx, tmp) + if err != nil { + return nil, err + } + } + args["includeDeprecated"] = arg0 + return args, nil +} + +func (ec *executionContext) field___Type_fields_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 bool + if tmp, ok := rawArgs["includeDeprecated"]; ok { + arg0, err = ec.unmarshalOBoolean2bool(ctx, tmp) + if err != nil { + return nil, err + } + } + args["includeDeprecated"] = arg0 + return args, nil +} + +// endregion ***************************** args.gotpl ***************************** + +// region **************************** field.gotpl ***************************** + +func (ec *executionContext) _Alert_id(ctx context.Context, field graphql.CollectedField, obj *alert.Alert) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Alert", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2int(ctx, field.Selections, res) +} + +func (ec *executionContext) _Alert_alertID(ctx context.Context, field graphql.CollectedField, obj *alert.Alert) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Alert", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Alert().AlertID(rctx, obj) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) _Alert_status(ctx context.Context, field graphql.CollectedField, obj *alert.Alert) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Alert", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Alert().Status(rctx, obj) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(AlertStatus) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNAlertStatus2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐAlertStatus(ctx, field.Selections, res) +} + +func (ec *executionContext) _Alert_summary(ctx context.Context, field graphql.CollectedField, obj *alert.Alert) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Alert", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Summary, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _Alert_details(ctx context.Context, field graphql.CollectedField, obj *alert.Alert) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Alert", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Details, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _Alert_createdAt(ctx context.Context, field graphql.CollectedField, obj *alert.Alert) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Alert", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.CreatedAt, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(time.Time) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNISOTimestamp2timeᚐTime(ctx, field.Selections, res) +} + +func (ec *executionContext) _Alert_serviceID(ctx context.Context, field graphql.CollectedField, obj *alert.Alert) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Alert", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ServiceID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _Alert_service(ctx context.Context, field graphql.CollectedField, obj *alert.Alert) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Alert", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Alert().Service(rctx, obj) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*service.Service) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOService2ᚖgithub.comᚋtargetᚋgoalertᚋserviceᚐService(ctx, field.Selections, res) +} + +func (ec *executionContext) _Alert_state(ctx context.Context, field graphql.CollectedField, obj *alert.Alert) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Alert", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Alert().State(rctx, obj) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*alert.State) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOAlertState2ᚖgithub.comᚋtargetᚋgoalertᚋalertᚐState(ctx, field.Selections, res) +} + +func (ec *executionContext) _AlertConnection_nodes(ctx context.Context, field graphql.CollectedField, obj *AlertConnection) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "AlertConnection", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Nodes, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]alert.Alert) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNAlert2ᚕgithub.comᚋtargetᚋgoalertᚋalertᚐAlert(ctx, field.Selections, res) +} + +func (ec *executionContext) _AlertConnection_pageInfo(ctx context.Context, field graphql.CollectedField, obj *AlertConnection) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "AlertConnection", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.PageInfo, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(PageInfo) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNPageInfo2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐPageInfo(ctx, field.Selections, res) +} + +func (ec *executionContext) _AlertState_lastEscalation(ctx context.Context, field graphql.CollectedField, obj *alert.State) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "AlertState", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.LastEscalation, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(time.Time) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNISOTimestamp2timeᚐTime(ctx, field.Selections, res) +} + +func (ec *executionContext) _AlertState_stepNumber(ctx context.Context, field graphql.CollectedField, obj *alert.State) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "AlertState", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.StepNumber, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) _AlertState_repeatCount(ctx context.Context, field graphql.CollectedField, obj *alert.State) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "AlertState", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.RepeatCount, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) _AuthSubject_providerID(ctx context.Context, field graphql.CollectedField, obj *user.AuthSubject) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "AuthSubject", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ProviderID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _AuthSubject_subjectID(ctx context.Context, field graphql.CollectedField, obj *user.AuthSubject) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "AuthSubject", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.SubjectID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _AuthSubject_userID(ctx context.Context, field graphql.CollectedField, obj *user.AuthSubject) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "AuthSubject", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.UserID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _AuthSubjectConnection_nodes(ctx context.Context, field graphql.CollectedField, obj *AuthSubjectConnection) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "AuthSubjectConnection", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Nodes, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]user.AuthSubject) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNAuthSubject2ᚕgithub.comᚋtargetᚋgoalertᚋuserᚐAuthSubject(ctx, field.Selections, res) +} + +func (ec *executionContext) _AuthSubjectConnection_pageInfo(ctx context.Context, field graphql.CollectedField, obj *AuthSubjectConnection) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "AuthSubjectConnection", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.PageInfo, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(PageInfo) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNPageInfo2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐPageInfo(ctx, field.Selections, res) +} + +func (ec *executionContext) _ConfigValue_id(ctx context.Context, field graphql.CollectedField, obj *ConfigValue) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "ConfigValue", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _ConfigValue_description(ctx context.Context, field graphql.CollectedField, obj *ConfigValue) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "ConfigValue", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Description, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _ConfigValue_value(ctx context.Context, field graphql.CollectedField, obj *ConfigValue) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "ConfigValue", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Value, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _ConfigValue_type(ctx context.Context, field graphql.CollectedField, obj *ConfigValue) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "ConfigValue", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Type, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(ConfigType) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNConfigType2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐConfigType(ctx, field.Selections, res) +} + +func (ec *executionContext) _ConfigValue_password(ctx context.Context, field graphql.CollectedField, obj *ConfigValue) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "ConfigValue", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Password, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) _EscalationPolicy_id(ctx context.Context, field graphql.CollectedField, obj *escalation.Policy) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "EscalationPolicy", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _EscalationPolicy_name(ctx context.Context, field graphql.CollectedField, obj *escalation.Policy) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "EscalationPolicy", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _EscalationPolicy_description(ctx context.Context, field graphql.CollectedField, obj *escalation.Policy) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "EscalationPolicy", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Description, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _EscalationPolicy_repeat(ctx context.Context, field graphql.CollectedField, obj *escalation.Policy) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "EscalationPolicy", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Repeat, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) _EscalationPolicy_assignedTo(ctx context.Context, field graphql.CollectedField, obj *escalation.Policy) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "EscalationPolicy", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.EscalationPolicy().AssignedTo(rctx, obj) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]assignment.RawTarget) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNTarget2ᚕgithub.comᚋtargetᚋgoalertᚋassignmentᚐRawTarget(ctx, field.Selections, res) +} + +func (ec *executionContext) _EscalationPolicy_steps(ctx context.Context, field graphql.CollectedField, obj *escalation.Policy) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "EscalationPolicy", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.EscalationPolicy().Steps(rctx, obj) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]escalation.Step) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNEscalationPolicyStep2ᚕgithub.comᚋtargetᚋgoalertᚋescalationᚐStep(ctx, field.Selections, res) +} + +func (ec *executionContext) _EscalationPolicyConnection_nodes(ctx context.Context, field graphql.CollectedField, obj *EscalationPolicyConnection) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "EscalationPolicyConnection", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Nodes, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]escalation.Policy) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNEscalationPolicy2ᚕgithub.comᚋtargetᚋgoalertᚋescalationᚐPolicy(ctx, field.Selections, res) +} + +func (ec *executionContext) _EscalationPolicyConnection_pageInfo(ctx context.Context, field graphql.CollectedField, obj *EscalationPolicyConnection) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "EscalationPolicyConnection", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.PageInfo, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(PageInfo) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNPageInfo2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐPageInfo(ctx, field.Selections, res) +} + +func (ec *executionContext) _EscalationPolicyStep_id(ctx context.Context, field graphql.CollectedField, obj *escalation.Step) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "EscalationPolicyStep", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _EscalationPolicyStep_stepNumber(ctx context.Context, field graphql.CollectedField, obj *escalation.Step) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "EscalationPolicyStep", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.StepNumber, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) _EscalationPolicyStep_delayMinutes(ctx context.Context, field graphql.CollectedField, obj *escalation.Step) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "EscalationPolicyStep", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.DelayMinutes, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) _EscalationPolicyStep_targets(ctx context.Context, field graphql.CollectedField, obj *escalation.Step) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "EscalationPolicyStep", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.EscalationPolicyStep().Targets(rctx, obj) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]assignment.RawTarget) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNTarget2ᚕgithub.comᚋtargetᚋgoalertᚋassignmentᚐRawTarget(ctx, field.Selections, res) +} + +func (ec *executionContext) _EscalationPolicyStep_escalationPolicy(ctx context.Context, field graphql.CollectedField, obj *escalation.Step) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "EscalationPolicyStep", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.EscalationPolicyStep().EscalationPolicy(rctx, obj) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*escalation.Policy) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOEscalationPolicy2ᚖgithub.comᚋtargetᚋgoalertᚋescalationᚐPolicy(ctx, field.Selections, res) +} + +func (ec *executionContext) _IntegrationKey_id(ctx context.Context, field graphql.CollectedField, obj *integrationkey.IntegrationKey) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "IntegrationKey", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _IntegrationKey_serviceID(ctx context.Context, field graphql.CollectedField, obj *integrationkey.IntegrationKey) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "IntegrationKey", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ServiceID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _IntegrationKey_type(ctx context.Context, field graphql.CollectedField, obj *integrationkey.IntegrationKey) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "IntegrationKey", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.IntegrationKey().Type(rctx, obj) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(IntegrationKeyType) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNIntegrationKeyType2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐIntegrationKeyType(ctx, field.Selections, res) +} + +func (ec *executionContext) _IntegrationKey_name(ctx context.Context, field graphql.CollectedField, obj *integrationkey.IntegrationKey) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "IntegrationKey", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _IntegrationKey_href(ctx context.Context, field graphql.CollectedField, obj *integrationkey.IntegrationKey) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "IntegrationKey", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.IntegrationKey().Href(rctx, obj) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _Label_key(ctx context.Context, field graphql.CollectedField, obj *label.Label) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Label", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Key, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _Label_value(ctx context.Context, field graphql.CollectedField, obj *label.Label) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Label", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Value, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _LabelConnection_nodes(ctx context.Context, field graphql.CollectedField, obj *LabelConnection) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "LabelConnection", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Nodes, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]label.Label) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNLabel2ᚕgithub.comᚋtargetᚋgoalertᚋlabelᚐLabel(ctx, field.Selections, res) +} + +func (ec *executionContext) _LabelConnection_pageInfo(ctx context.Context, field graphql.CollectedField, obj *LabelConnection) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "LabelConnection", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.PageInfo, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(PageInfo) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNPageInfo2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐPageInfo(ctx, field.Selections, res) +} + +func (ec *executionContext) _Mutation_addAuthSubject(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Mutation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Mutation_addAuthSubject_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().AddAuthSubject(rctx, args["input"].(user.AuthSubject)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) _Mutation_deleteAuthSubject(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Mutation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Mutation_deleteAuthSubject_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().DeleteAuthSubject(rctx, args["input"].(user.AuthSubject)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) _Mutation_updateUser(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Mutation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Mutation_updateUser_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().UpdateUser(rctx, args["input"].(UpdateUserInput)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) _Mutation_testContactMethod(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Mutation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Mutation_testContactMethod_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().TestContactMethod(rctx, args["id"].(string)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) _Mutation_updateAlerts(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Mutation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Mutation_updateAlerts_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().UpdateAlerts(rctx, args["input"].(UpdateAlertsInput)) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]alert.Alert) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOAlert2ᚕgithub.comᚋtargetᚋgoalertᚋalertᚐAlert(ctx, field.Selections, res) +} + +func (ec *executionContext) _Mutation_updateRotation(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Mutation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Mutation_updateRotation_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().UpdateRotation(rctx, args["input"].(UpdateRotationInput)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) _Mutation_escalateAlerts(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Mutation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Mutation_escalateAlerts_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().EscalateAlerts(rctx, args["input"].([]int)) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]alert.Alert) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOAlert2ᚕgithub.comᚋtargetᚋgoalertᚋalertᚐAlert(ctx, field.Selections, res) +} + +func (ec *executionContext) _Mutation_setFavorite(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Mutation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Mutation_setFavorite_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().SetFavorite(rctx, args["input"].(SetFavoriteInput)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) _Mutation_updateService(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Mutation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Mutation_updateService_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().UpdateService(rctx, args["input"].(UpdateServiceInput)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) _Mutation_updateEscalationPolicy(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Mutation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Mutation_updateEscalationPolicy_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().UpdateEscalationPolicy(rctx, args["input"].(UpdateEscalationPolicyInput)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) _Mutation_updateEscalationPolicyStep(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Mutation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Mutation_updateEscalationPolicyStep_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().UpdateEscalationPolicyStep(rctx, args["input"].(UpdateEscalationPolicyStepInput)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) _Mutation_deleteAll(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Mutation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Mutation_deleteAll_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().DeleteAll(rctx, args["input"].([]assignment.RawTarget)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) _Mutation_createService(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Mutation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Mutation_createService_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().CreateService(rctx, args["input"].(CreateServiceInput)) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*service.Service) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOService2ᚖgithub.comᚋtargetᚋgoalertᚋserviceᚐService(ctx, field.Selections, res) +} + +func (ec *executionContext) _Mutation_createEscalationPolicy(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Mutation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Mutation_createEscalationPolicy_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().CreateEscalationPolicy(rctx, args["input"].(CreateEscalationPolicyInput)) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*escalation.Policy) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOEscalationPolicy2ᚖgithub.comᚋtargetᚋgoalertᚋescalationᚐPolicy(ctx, field.Selections, res) +} + +func (ec *executionContext) _Mutation_createEscalationPolicyStep(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Mutation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Mutation_createEscalationPolicyStep_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().CreateEscalationPolicyStep(rctx, args["input"].(CreateEscalationPolicyStepInput)) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*escalation.Step) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOEscalationPolicyStep2ᚖgithub.comᚋtargetᚋgoalertᚋescalationᚐStep(ctx, field.Selections, res) +} + +func (ec *executionContext) _Mutation_createRotation(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Mutation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Mutation_createRotation_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().CreateRotation(rctx, args["input"].(CreateRotationInput)) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*rotation.Rotation) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalORotation2ᚖgithub.comᚋtargetᚋgoalertᚋscheduleᚋrotationᚐRotation(ctx, field.Selections, res) +} + +func (ec *executionContext) _Mutation_createIntegrationKey(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Mutation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Mutation_createIntegrationKey_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().CreateIntegrationKey(rctx, args["input"].(CreateIntegrationKeyInput)) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*integrationkey.IntegrationKey) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOIntegrationKey2ᚖgithub.comᚋtargetᚋgoalertᚋintegrationkeyᚐIntegrationKey(ctx, field.Selections, res) +} + +func (ec *executionContext) _Mutation_setLabel(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Mutation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Mutation_setLabel_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().SetLabel(rctx, args["input"].(SetLabelInput)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) _Mutation_createSchedule(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Mutation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Mutation_createSchedule_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().CreateSchedule(rctx, args["input"].(CreateScheduleInput)) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*schedule.Schedule) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOSchedule2ᚖgithub.comᚋtargetᚋgoalertᚋscheduleᚐSchedule(ctx, field.Selections, res) +} + +func (ec *executionContext) _Mutation_updateScheduleTarget(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Mutation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Mutation_updateScheduleTarget_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().UpdateScheduleTarget(rctx, args["input"].(ScheduleTargetInput)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) _Mutation_createUserOverride(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Mutation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Mutation_createUserOverride_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().CreateUserOverride(rctx, args["input"].(CreateUserOverrideInput)) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*override.UserOverride) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOUserOverride2ᚖgithub.comᚋtargetᚋgoalertᚋoverrideᚐUserOverride(ctx, field.Selections, res) +} + +func (ec *executionContext) _Mutation_createUserContactMethod(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Mutation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Mutation_createUserContactMethod_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().CreateUserContactMethod(rctx, args["input"].(CreateUserContactMethodInput)) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*contactmethod.ContactMethod) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOUserContactMethod2ᚖgithub.comᚋtargetᚋgoalertᚋuserᚋcontactmethodᚐContactMethod(ctx, field.Selections, res) +} + +func (ec *executionContext) _Mutation_createUserNotificationRule(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Mutation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Mutation_createUserNotificationRule_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().CreateUserNotificationRule(rctx, args["input"].(CreateUserNotificationRuleInput)) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*notificationrule.NotificationRule) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOUserNotificationRule2ᚖgithub.comᚋtargetᚋgoalertᚋuserᚋnotificationruleᚐNotificationRule(ctx, field.Selections, res) +} + +func (ec *executionContext) _Mutation_updateUserContactMethod(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Mutation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Mutation_updateUserContactMethod_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().UpdateUserContactMethod(rctx, args["input"].(UpdateUserContactMethodInput)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) _Mutation_updateSchedule(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Mutation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Mutation_updateSchedule_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().UpdateSchedule(rctx, args["input"].(UpdateScheduleInput)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) _Mutation_updateUserOverride(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Mutation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Mutation_updateUserOverride_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().UpdateUserOverride(rctx, args["input"].(UpdateUserOverrideInput)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) _Mutation_setConfig(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Mutation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Mutation_setConfig_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().SetConfig(rctx, args["input"].([]ConfigValueInput)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) _OnCallShift_userID(ctx context.Context, field graphql.CollectedField, obj *oncall.Shift) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "OnCallShift", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.UserID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _OnCallShift_user(ctx context.Context, field graphql.CollectedField, obj *oncall.Shift) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "OnCallShift", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.OnCallShift().User(rctx, obj) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*user.User) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOUser2ᚖgithub.comᚋtargetᚋgoalertᚋuserᚐUser(ctx, field.Selections, res) +} + +func (ec *executionContext) _OnCallShift_start(ctx context.Context, field graphql.CollectedField, obj *oncall.Shift) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "OnCallShift", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Start, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(time.Time) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNISOTimestamp2timeᚐTime(ctx, field.Selections, res) +} + +func (ec *executionContext) _OnCallShift_end(ctx context.Context, field graphql.CollectedField, obj *oncall.Shift) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "OnCallShift", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.End, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(time.Time) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNISOTimestamp2timeᚐTime(ctx, field.Selections, res) +} + +func (ec *executionContext) _OnCallShift_truncated(ctx context.Context, field graphql.CollectedField, obj *oncall.Shift) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "OnCallShift", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Truncated, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) _PageInfo_endCursor(ctx context.Context, field graphql.CollectedField, obj *PageInfo) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "PageInfo", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.EndCursor, nil + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOString2ᚖstring(ctx, field.Selections, res) +} + +func (ec *executionContext) _PageInfo_hasNextPage(ctx context.Context, field graphql.CollectedField, obj *PageInfo) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "PageInfo", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.HasNextPage, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) _Query_user(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Query", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Query_user_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().User(rctx, args["id"].(*string)) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*user.User) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOUser2ᚖgithub.comᚋtargetᚋgoalertᚋuserᚐUser(ctx, field.Selections, res) +} + +func (ec *executionContext) _Query_users(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Query", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Query_users_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().Users(rctx, args["input"].(*UserSearchOptions), args["first"].(*int), args["after"].(*string), args["search"].(*string)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*UserConnection) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNUserConnection2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐUserConnection(ctx, field.Selections, res) +} + +func (ec *executionContext) _Query_alert(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Query", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Query_alert_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().Alert(rctx, args["id"].(int)) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*alert.Alert) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOAlert2ᚖgithub.comᚋtargetᚋgoalertᚋalertᚐAlert(ctx, field.Selections, res) +} + +func (ec *executionContext) _Query_alerts(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Query", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Query_alerts_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().Alerts(rctx, args["input"].(*AlertSearchOptions)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*AlertConnection) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNAlertConnection2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐAlertConnection(ctx, field.Selections, res) +} + +func (ec *executionContext) _Query_service(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Query", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Query_service_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().Service(rctx, args["id"].(string)) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*service.Service) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOService2ᚖgithub.comᚋtargetᚋgoalertᚋserviceᚐService(ctx, field.Selections, res) +} + +func (ec *executionContext) _Query_integrationKey(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Query", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Query_integrationKey_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().IntegrationKey(rctx, args["id"].(string)) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*integrationkey.IntegrationKey) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOIntegrationKey2ᚖgithub.comᚋtargetᚋgoalertᚋintegrationkeyᚐIntegrationKey(ctx, field.Selections, res) +} + +func (ec *executionContext) _Query_services(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Query", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Query_services_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().Services(rctx, args["input"].(*ServiceSearchOptions)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*ServiceConnection) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNServiceConnection2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐServiceConnection(ctx, field.Selections, res) +} + +func (ec *executionContext) _Query_rotation(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Query", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Query_rotation_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().Rotation(rctx, args["id"].(string)) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*rotation.Rotation) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalORotation2ᚖgithub.comᚋtargetᚋgoalertᚋscheduleᚋrotationᚐRotation(ctx, field.Selections, res) +} + +func (ec *executionContext) _Query_rotations(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Query", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Query_rotations_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().Rotations(rctx, args["input"].(*RotationSearchOptions)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*RotationConnection) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNRotationConnection2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐRotationConnection(ctx, field.Selections, res) +} + +func (ec *executionContext) _Query_schedule(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Query", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Query_schedule_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().Schedule(rctx, args["id"].(string)) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*schedule.Schedule) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOSchedule2ᚖgithub.comᚋtargetᚋgoalertᚋscheduleᚐSchedule(ctx, field.Selections, res) +} + +func (ec *executionContext) _Query_schedules(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Query", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Query_schedules_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().Schedules(rctx, args["input"].(*ScheduleSearchOptions)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*ScheduleConnection) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNScheduleConnection2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐScheduleConnection(ctx, field.Selections, res) +} + +func (ec *executionContext) _Query_escalationPolicy(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Query", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Query_escalationPolicy_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().EscalationPolicy(rctx, args["id"].(string)) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*escalation.Policy) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOEscalationPolicy2ᚖgithub.comᚋtargetᚋgoalertᚋescalationᚐPolicy(ctx, field.Selections, res) +} + +func (ec *executionContext) _Query_escalationPolicies(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Query", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Query_escalationPolicies_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().EscalationPolicies(rctx, args["input"].(*EscalationPolicySearchOptions)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*EscalationPolicyConnection) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNEscalationPolicyConnection2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐEscalationPolicyConnection(ctx, field.Selections, res) +} + +func (ec *executionContext) _Query_authSubjectsForProvider(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Query", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Query_authSubjectsForProvider_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().AuthSubjectsForProvider(rctx, args["first"].(*int), args["after"].(*string), args["providerID"].(string)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*AuthSubjectConnection) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNAuthSubjectConnection2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐAuthSubjectConnection(ctx, field.Selections, res) +} + +func (ec *executionContext) _Query_timeZones(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Query", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Query_timeZones_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().TimeZones(rctx, args["input"].(*TimeZoneSearchOptions)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*TimeZoneConnection) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNTimeZoneConnection2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐTimeZoneConnection(ctx, field.Selections, res) +} + +func (ec *executionContext) _Query_labels(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Query", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Query_labels_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().Labels(rctx, args["input"].(*LabelSearchOptions)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*LabelConnection) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNLabelConnection2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐLabelConnection(ctx, field.Selections, res) +} + +func (ec *executionContext) _Query_userOverrides(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Query", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Query_userOverrides_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().UserOverrides(rctx, args["input"].(*UserOverrideSearchOptions)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*UserOverrideConnection) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNUserOverrideConnection2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐUserOverrideConnection(ctx, field.Selections, res) +} + +func (ec *executionContext) _Query_userOverride(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Query", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Query_userOverride_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().UserOverride(rctx, args["id"].(string)) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*override.UserOverride) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOUserOverride2ᚖgithub.comᚋtargetᚋgoalertᚋoverrideᚐUserOverride(ctx, field.Selections, res) +} + +func (ec *executionContext) _Query_config(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Query", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Query_config_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().Config(rctx, args["all"].(*bool)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]ConfigValue) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNConfigValue2ᚕgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐConfigValue(ctx, field.Selections, res) +} + +func (ec *executionContext) _Query_userContactMethod(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Query", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Query_userContactMethod_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().UserContactMethod(rctx, args["id"].(string)) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*contactmethod.ContactMethod) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOUserContactMethod2ᚖgithub.comᚋtargetᚋgoalertᚋuserᚋcontactmethodᚐContactMethod(ctx, field.Selections, res) +} + +func (ec *executionContext) _Query_slackChannels(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Query", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Query_slackChannels_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().SlackChannels(rctx, args["input"].(*SlackChannelSearchOptions)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*SlackChannelConnection) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNSlackChannelConnection2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐSlackChannelConnection(ctx, field.Selections, res) +} + +func (ec *executionContext) _Query_slackChannel(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Query", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Query_slackChannel_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().SlackChannel(rctx, args["id"].(string)) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*slack.Channel) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOSlackChannel2ᚖgithub.comᚋtargetᚋgoalertᚋnotificationᚋslackᚐChannel(ctx, field.Selections, res) +} + +func (ec *executionContext) _Query___type(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Query", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Query___type_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.introspectType(args["name"].(string)) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*introspection.Type) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalO__Type2ᚖgithub.comᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res) +} + +func (ec *executionContext) _Query___schema(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Query", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, nil, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.introspectSchema() + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*introspection.Schema) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalO__Schema2ᚖgithub.comᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐSchema(ctx, field.Selections, res) +} + +func (ec *executionContext) _Rotation_id(ctx context.Context, field graphql.CollectedField, obj *rotation.Rotation) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Rotation", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _Rotation_name(ctx context.Context, field graphql.CollectedField, obj *rotation.Rotation) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Rotation", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _Rotation_description(ctx context.Context, field graphql.CollectedField, obj *rotation.Rotation) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Rotation", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Description, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _Rotation_start(ctx context.Context, field graphql.CollectedField, obj *rotation.Rotation) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Rotation", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Start, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(time.Time) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNISOTimestamp2timeᚐTime(ctx, field.Selections, res) +} + +func (ec *executionContext) _Rotation_timeZone(ctx context.Context, field graphql.CollectedField, obj *rotation.Rotation) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Rotation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Rotation().TimeZone(rctx, obj) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _Rotation_type(ctx context.Context, field graphql.CollectedField, obj *rotation.Rotation) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Rotation", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Type, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(rotation.Type) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNRotationType2githubᚗcomᚋtargetᚋgoalertᚋscheduleᚋrotationᚐType(ctx, field.Selections, res) +} + +func (ec *executionContext) _Rotation_shiftLength(ctx context.Context, field graphql.CollectedField, obj *rotation.Rotation) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Rotation", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ShiftLength, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) _Rotation_activeUserIndex(ctx context.Context, field graphql.CollectedField, obj *rotation.Rotation) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Rotation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Rotation().ActiveUserIndex(rctx, obj) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) _Rotation_userIDs(ctx context.Context, field graphql.CollectedField, obj *rotation.Rotation) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Rotation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Rotation().UserIDs(rctx, obj) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2ᚕstring(ctx, field.Selections, res) +} + +func (ec *executionContext) _Rotation_users(ctx context.Context, field graphql.CollectedField, obj *rotation.Rotation) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Rotation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Rotation().Users(rctx, obj) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]user.User) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNUser2ᚕgithub.comᚋtargetᚋgoalertᚋuserᚐUser(ctx, field.Selections, res) +} + +func (ec *executionContext) _Rotation_nextHandoffTimes(ctx context.Context, field graphql.CollectedField, obj *rotation.Rotation) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Rotation", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Rotation_nextHandoffTimes_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Rotation().NextHandoffTimes(rctx, obj, args["num"].(*int)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]time.Time) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNISOTimestamp2ᚕtimeᚐTime(ctx, field.Selections, res) +} + +func (ec *executionContext) _RotationConnection_nodes(ctx context.Context, field graphql.CollectedField, obj *RotationConnection) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "RotationConnection", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Nodes, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]rotation.Rotation) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNRotation2ᚕgithub.comᚋtargetᚋgoalertᚋscheduleᚋrotationᚐRotation(ctx, field.Selections, res) +} + +func (ec *executionContext) _RotationConnection_pageInfo(ctx context.Context, field graphql.CollectedField, obj *RotationConnection) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "RotationConnection", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.PageInfo, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(PageInfo) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNPageInfo2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐPageInfo(ctx, field.Selections, res) +} + +func (ec *executionContext) _Schedule_id(ctx context.Context, field graphql.CollectedField, obj *schedule.Schedule) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Schedule", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _Schedule_name(ctx context.Context, field graphql.CollectedField, obj *schedule.Schedule) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Schedule", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _Schedule_description(ctx context.Context, field graphql.CollectedField, obj *schedule.Schedule) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Schedule", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Description, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _Schedule_timeZone(ctx context.Context, field graphql.CollectedField, obj *schedule.Schedule) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Schedule", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Schedule().TimeZone(rctx, obj) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _Schedule_assignedTo(ctx context.Context, field graphql.CollectedField, obj *schedule.Schedule) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Schedule", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Schedule().AssignedTo(rctx, obj) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]assignment.RawTarget) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNTarget2ᚕgithub.comᚋtargetᚋgoalertᚋassignmentᚐRawTarget(ctx, field.Selections, res) +} + +func (ec *executionContext) _Schedule_shifts(ctx context.Context, field graphql.CollectedField, obj *schedule.Schedule) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Schedule", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Schedule_shifts_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Schedule().Shifts(rctx, obj, args["start"].(time.Time), args["end"].(time.Time)) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]oncall.Shift) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNOnCallShift2ᚕgithub.comᚋtargetᚋgoalertᚋoncallᚐShift(ctx, field.Selections, res) +} + +func (ec *executionContext) _Schedule_targets(ctx context.Context, field graphql.CollectedField, obj *schedule.Schedule) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Schedule", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Schedule().Targets(rctx, obj) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]ScheduleTarget) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNScheduleTarget2ᚕgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐScheduleTarget(ctx, field.Selections, res) +} + +func (ec *executionContext) _Schedule_target(ctx context.Context, field graphql.CollectedField, obj *schedule.Schedule) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Schedule", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Schedule_target_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Schedule().Target(rctx, obj, args["input"].(assignment.RawTarget)) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*ScheduleTarget) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOScheduleTarget2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐScheduleTarget(ctx, field.Selections, res) +} + +func (ec *executionContext) _ScheduleConnection_nodes(ctx context.Context, field graphql.CollectedField, obj *ScheduleConnection) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "ScheduleConnection", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Nodes, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]schedule.Schedule) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNSchedule2ᚕgithub.comᚋtargetᚋgoalertᚋscheduleᚐSchedule(ctx, field.Selections, res) +} + +func (ec *executionContext) _ScheduleConnection_pageInfo(ctx context.Context, field graphql.CollectedField, obj *ScheduleConnection) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "ScheduleConnection", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.PageInfo, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(PageInfo) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNPageInfo2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐPageInfo(ctx, field.Selections, res) +} + +func (ec *executionContext) _ScheduleRule_id(ctx context.Context, field graphql.CollectedField, obj *rule.Rule) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "ScheduleRule", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _ScheduleRule_scheduleID(ctx context.Context, field graphql.CollectedField, obj *rule.Rule) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "ScheduleRule", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ScheduleID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _ScheduleRule_start(ctx context.Context, field graphql.CollectedField, obj *rule.Rule) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "ScheduleRule", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Start, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(rule.Clock) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNClockTime2githubᚗcomᚋtargetᚋgoalertᚋscheduleᚋruleᚐClock(ctx, field.Selections, res) +} + +func (ec *executionContext) _ScheduleRule_end(ctx context.Context, field graphql.CollectedField, obj *rule.Rule) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "ScheduleRule", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.End, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(rule.Clock) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNClockTime2githubᚗcomᚋtargetᚋgoalertᚋscheduleᚋruleᚐClock(ctx, field.Selections, res) +} + +func (ec *executionContext) _ScheduleRule_weekdayFilter(ctx context.Context, field graphql.CollectedField, obj *rule.Rule) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "ScheduleRule", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.ScheduleRule().WeekdayFilter(rctx, obj) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]bool) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNBoolean2ᚕbool(ctx, field.Selections, res) +} + +func (ec *executionContext) _ScheduleRule_target(ctx context.Context, field graphql.CollectedField, obj *rule.Rule) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "ScheduleRule", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.ScheduleRule().Target(rctx, obj) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*assignment.RawTarget) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNTarget2ᚖgithub.comᚋtargetᚋgoalertᚋassignmentᚐRawTarget(ctx, field.Selections, res) +} + +func (ec *executionContext) _ScheduleTarget_scheduleID(ctx context.Context, field graphql.CollectedField, obj *ScheduleTarget) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "ScheduleTarget", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ScheduleID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _ScheduleTarget_target(ctx context.Context, field graphql.CollectedField, obj *ScheduleTarget) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "ScheduleTarget", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Target, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(assignment.RawTarget) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNTarget2githubᚗcomᚋtargetᚋgoalertᚋassignmentᚐRawTarget(ctx, field.Selections, res) +} + +func (ec *executionContext) _ScheduleTarget_rules(ctx context.Context, field graphql.CollectedField, obj *ScheduleTarget) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "ScheduleTarget", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Rules, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]rule.Rule) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNScheduleRule2ᚕgithub.comᚋtargetᚋgoalertᚋscheduleᚋruleᚐRule(ctx, field.Selections, res) +} + +func (ec *executionContext) _Service_id(ctx context.Context, field graphql.CollectedField, obj *service.Service) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Service", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _Service_name(ctx context.Context, field graphql.CollectedField, obj *service.Service) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Service", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _Service_description(ctx context.Context, field graphql.CollectedField, obj *service.Service) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Service", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Description, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _Service_escalationPolicyID(ctx context.Context, field graphql.CollectedField, obj *service.Service) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Service", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.EscalationPolicyID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _Service_escalationPolicy(ctx context.Context, field graphql.CollectedField, obj *service.Service) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Service", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Service().EscalationPolicy(rctx, obj) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*escalation.Policy) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOEscalationPolicy2ᚖgithub.comᚋtargetᚋgoalertᚋescalationᚐPolicy(ctx, field.Selections, res) +} + +func (ec *executionContext) _Service_isFavorite(ctx context.Context, field graphql.CollectedField, obj *service.Service) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Service", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Service().IsFavorite(rctx, obj) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) _Service_onCallUsers(ctx context.Context, field graphql.CollectedField, obj *service.Service) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Service", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Service().OnCallUsers(rctx, obj) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]oncall.ServiceOnCallUser) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNServiceOnCallUser2ᚕgithub.comᚋtargetᚋgoalertᚋoncallᚐServiceOnCallUser(ctx, field.Selections, res) +} + +func (ec *executionContext) _Service_integrationKeys(ctx context.Context, field graphql.CollectedField, obj *service.Service) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Service", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Service().IntegrationKeys(rctx, obj) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]integrationkey.IntegrationKey) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNIntegrationKey2ᚕgithub.comᚋtargetᚋgoalertᚋintegrationkeyᚐIntegrationKey(ctx, field.Selections, res) +} + +func (ec *executionContext) _Service_labels(ctx context.Context, field graphql.CollectedField, obj *service.Service) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Service", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Service().Labels(rctx, obj) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]label.Label) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNLabel2ᚕgithub.comᚋtargetᚋgoalertᚋlabelᚐLabel(ctx, field.Selections, res) +} + +func (ec *executionContext) _ServiceConnection_nodes(ctx context.Context, field graphql.CollectedField, obj *ServiceConnection) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "ServiceConnection", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Nodes, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]service.Service) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNService2ᚕgithub.comᚋtargetᚋgoalertᚋserviceᚐService(ctx, field.Selections, res) +} + +func (ec *executionContext) _ServiceConnection_pageInfo(ctx context.Context, field graphql.CollectedField, obj *ServiceConnection) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "ServiceConnection", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.PageInfo, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(PageInfo) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNPageInfo2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐPageInfo(ctx, field.Selections, res) +} + +func (ec *executionContext) _ServiceOnCallUser_userID(ctx context.Context, field graphql.CollectedField, obj *oncall.ServiceOnCallUser) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "ServiceOnCallUser", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.UserID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _ServiceOnCallUser_userName(ctx context.Context, field graphql.CollectedField, obj *oncall.ServiceOnCallUser) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "ServiceOnCallUser", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.UserName, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _ServiceOnCallUser_stepNumber(ctx context.Context, field graphql.CollectedField, obj *oncall.ServiceOnCallUser) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "ServiceOnCallUser", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.StepNumber, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) _SlackChannel_id(ctx context.Context, field graphql.CollectedField, obj *slack.Channel) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "SlackChannel", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _SlackChannel_name(ctx context.Context, field graphql.CollectedField, obj *slack.Channel) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "SlackChannel", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _SlackChannelConnection_nodes(ctx context.Context, field graphql.CollectedField, obj *SlackChannelConnection) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "SlackChannelConnection", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Nodes, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]slack.Channel) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNSlackChannel2ᚕgithub.comᚋtargetᚋgoalertᚋnotificationᚋslackᚐChannel(ctx, field.Selections, res) +} + +func (ec *executionContext) _SlackChannelConnection_pageInfo(ctx context.Context, field graphql.CollectedField, obj *SlackChannelConnection) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "SlackChannelConnection", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.PageInfo, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(PageInfo) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNPageInfo2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐPageInfo(ctx, field.Selections, res) +} + +func (ec *executionContext) _Target_id(ctx context.Context, field graphql.CollectedField, obj *assignment.RawTarget) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Target", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _Target_type(ctx context.Context, field graphql.CollectedField, obj *assignment.RawTarget) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Target", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Type, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(assignment.TargetType) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNTargetType2githubᚗcomᚋtargetᚋgoalertᚋassignmentᚐTargetType(ctx, field.Selections, res) +} + +func (ec *executionContext) _Target_name(ctx context.Context, field graphql.CollectedField, obj *assignment.RawTarget) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "Target", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Target().Name(rctx, obj) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOString2ᚖstring(ctx, field.Selections, res) +} + +func (ec *executionContext) _TimeZone_id(ctx context.Context, field graphql.CollectedField, obj *TimeZone) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "TimeZone", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _TimeZoneConnection_nodes(ctx context.Context, field graphql.CollectedField, obj *TimeZoneConnection) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "TimeZoneConnection", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Nodes, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]TimeZone) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNTimeZone2ᚕgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐTimeZone(ctx, field.Selections, res) +} + +func (ec *executionContext) _TimeZoneConnection_pageInfo(ctx context.Context, field graphql.CollectedField, obj *TimeZoneConnection) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "TimeZoneConnection", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.PageInfo, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(PageInfo) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNPageInfo2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐPageInfo(ctx, field.Selections, res) +} + +func (ec *executionContext) _User_id(ctx context.Context, field graphql.CollectedField, obj *user.User) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "User", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _User_role(ctx context.Context, field graphql.CollectedField, obj *user.User) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "User", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.User().Role(rctx, obj) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(UserRole) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNUserRole2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUserRole(ctx, field.Selections, res) +} + +func (ec *executionContext) _User_name(ctx context.Context, field graphql.CollectedField, obj *user.User) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "User", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _User_email(ctx context.Context, field graphql.CollectedField, obj *user.User) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "User", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Email, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _User_contactMethods(ctx context.Context, field graphql.CollectedField, obj *user.User) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "User", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.User().ContactMethods(rctx, obj) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]contactmethod.ContactMethod) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNUserContactMethod2ᚕgithub.comᚋtargetᚋgoalertᚋuserᚋcontactmethodᚐContactMethod(ctx, field.Selections, res) +} + +func (ec *executionContext) _User_notificationRules(ctx context.Context, field graphql.CollectedField, obj *user.User) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "User", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.User().NotificationRules(rctx, obj) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]notificationrule.NotificationRule) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNUserNotificationRule2ᚕgithub.comᚋtargetᚋgoalertᚋuserᚋnotificationruleᚐNotificationRule(ctx, field.Selections, res) +} + +func (ec *executionContext) _User_statusUpdateContactMethodID(ctx context.Context, field graphql.CollectedField, obj *user.User) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "User", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.AlertStatusCMID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _User_authSubjects(ctx context.Context, field graphql.CollectedField, obj *user.User) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "User", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.User().AuthSubjects(rctx, obj) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]user.AuthSubject) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNAuthSubject2ᚕgithub.comᚋtargetᚋgoalertᚋuserᚐAuthSubject(ctx, field.Selections, res) +} + +func (ec *executionContext) _User_onCallSteps(ctx context.Context, field graphql.CollectedField, obj *user.User) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "User", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.User().OnCallSteps(rctx, obj) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]escalation.Step) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNEscalationPolicyStep2ᚕgithub.comᚋtargetᚋgoalertᚋescalationᚐStep(ctx, field.Selections, res) +} + +func (ec *executionContext) _UserConnection_nodes(ctx context.Context, field graphql.CollectedField, obj *UserConnection) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "UserConnection", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Nodes, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]user.User) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNUser2ᚕgithub.comᚋtargetᚋgoalertᚋuserᚐUser(ctx, field.Selections, res) +} + +func (ec *executionContext) _UserConnection_pageInfo(ctx context.Context, field graphql.CollectedField, obj *UserConnection) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "UserConnection", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.PageInfo, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(PageInfo) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNPageInfo2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐPageInfo(ctx, field.Selections, res) +} + +func (ec *executionContext) _UserContactMethod_id(ctx context.Context, field graphql.CollectedField, obj *contactmethod.ContactMethod) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "UserContactMethod", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _UserContactMethod_type(ctx context.Context, field graphql.CollectedField, obj *contactmethod.ContactMethod) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "UserContactMethod", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Type, nil + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(contactmethod.Type) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOContactMethodType2githubᚗcomᚋtargetᚋgoalertᚋuserᚋcontactmethodᚐType(ctx, field.Selections, res) +} + +func (ec *executionContext) _UserContactMethod_name(ctx context.Context, field graphql.CollectedField, obj *contactmethod.ContactMethod) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "UserContactMethod", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _UserContactMethod_value(ctx context.Context, field graphql.CollectedField, obj *contactmethod.ContactMethod) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "UserContactMethod", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Value, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _UserNotificationRule_id(ctx context.Context, field graphql.CollectedField, obj *notificationrule.NotificationRule) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "UserNotificationRule", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _UserNotificationRule_delayMinutes(ctx context.Context, field graphql.CollectedField, obj *notificationrule.NotificationRule) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "UserNotificationRule", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.DelayMinutes, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) _UserNotificationRule_contactMethodID(ctx context.Context, field graphql.CollectedField, obj *notificationrule.NotificationRule) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "UserNotificationRule", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ContactMethodID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _UserNotificationRule_contactMethod(ctx context.Context, field graphql.CollectedField, obj *notificationrule.NotificationRule) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "UserNotificationRule", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.UserNotificationRule().ContactMethod(rctx, obj) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*contactmethod.ContactMethod) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOUserContactMethod2ᚖgithub.comᚋtargetᚋgoalertᚋuserᚋcontactmethodᚐContactMethod(ctx, field.Selections, res) +} + +func (ec *executionContext) _UserOverride_id(ctx context.Context, field graphql.CollectedField, obj *override.UserOverride) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "UserOverride", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _UserOverride_start(ctx context.Context, field graphql.CollectedField, obj *override.UserOverride) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "UserOverride", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Start, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(time.Time) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNISOTimestamp2timeᚐTime(ctx, field.Selections, res) +} + +func (ec *executionContext) _UserOverride_end(ctx context.Context, field graphql.CollectedField, obj *override.UserOverride) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "UserOverride", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.End, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(time.Time) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNISOTimestamp2timeᚐTime(ctx, field.Selections, res) +} + +func (ec *executionContext) _UserOverride_addUserID(ctx context.Context, field graphql.CollectedField, obj *override.UserOverride) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "UserOverride", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.AddUserID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _UserOverride_removeUserID(ctx context.Context, field graphql.CollectedField, obj *override.UserOverride) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "UserOverride", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.RemoveUserID, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _UserOverride_addUser(ctx context.Context, field graphql.CollectedField, obj *override.UserOverride) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "UserOverride", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.UserOverride().AddUser(rctx, obj) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*user.User) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOUser2ᚖgithub.comᚋtargetᚋgoalertᚋuserᚐUser(ctx, field.Selections, res) +} + +func (ec *executionContext) _UserOverride_removeUser(ctx context.Context, field graphql.CollectedField, obj *override.UserOverride) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "UserOverride", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.UserOverride().RemoveUser(rctx, obj) + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*user.User) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOUser2ᚖgithub.comᚋtargetᚋgoalertᚋuserᚐUser(ctx, field.Selections, res) +} + +func (ec *executionContext) _UserOverride_target(ctx context.Context, field graphql.CollectedField, obj *override.UserOverride) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "UserOverride", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.UserOverride().Target(rctx, obj) + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*assignment.RawTarget) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNTarget2ᚖgithub.comᚋtargetᚋgoalertᚋassignmentᚐRawTarget(ctx, field.Selections, res) +} + +func (ec *executionContext) _UserOverrideConnection_nodes(ctx context.Context, field graphql.CollectedField, obj *UserOverrideConnection) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "UserOverrideConnection", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Nodes, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]override.UserOverride) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNUserOverride2ᚕgithub.comᚋtargetᚋgoalertᚋoverrideᚐUserOverride(ctx, field.Selections, res) +} + +func (ec *executionContext) _UserOverrideConnection_pageInfo(ctx context.Context, field graphql.CollectedField, obj *UserOverrideConnection) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "UserOverrideConnection", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.PageInfo, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(PageInfo) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNPageInfo2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐPageInfo(ctx, field.Selections, res) +} + +func (ec *executionContext) ___Directive_name(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__Directive", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) ___Directive_description(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__Directive", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Description, nil + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) ___Directive_locations(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__Directive", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Locations, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalN__DirectiveLocation2ᚕstring(ctx, field.Selections, res) +} + +func (ec *executionContext) ___Directive_args(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__Directive", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Args, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]introspection.InputValue) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalN__InputValue2ᚕgithub.comᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx, field.Selections, res) +} + +func (ec *executionContext) ___EnumValue_name(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__EnumValue", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) ___EnumValue_description(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__EnumValue", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Description, nil + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) ___EnumValue_isDeprecated(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__EnumValue", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.IsDeprecated(), nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) ___EnumValue_deprecationReason(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__EnumValue", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.DeprecationReason(), nil + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOString2ᚖstring(ctx, field.Selections, res) +} + +func (ec *executionContext) ___Field_name(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__Field", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) ___Field_description(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__Field", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Description, nil + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) ___Field_args(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__Field", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Args, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]introspection.InputValue) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalN__InputValue2ᚕgithub.comᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx, field.Selections, res) +} + +func (ec *executionContext) ___Field_type(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__Field", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Type, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*introspection.Type) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalN__Type2ᚖgithub.comᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res) +} + +func (ec *executionContext) ___Field_isDeprecated(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__Field", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.IsDeprecated(), nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(bool) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) ___Field_deprecationReason(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__Field", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.DeprecationReason(), nil + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOString2ᚖstring(ctx, field.Selections, res) +} + +func (ec *executionContext) ___InputValue_name(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__InputValue", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) ___InputValue_description(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__InputValue", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Description, nil + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) ___InputValue_type(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__InputValue", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Type, nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*introspection.Type) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalN__Type2ᚖgithub.comᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res) +} + +func (ec *executionContext) ___InputValue_defaultValue(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__InputValue", + Field: field, + Args: nil, + IsMethod: false, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.DefaultValue, nil + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOString2ᚖstring(ctx, field.Selections, res) +} + +func (ec *executionContext) ___Schema_types(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__Schema", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Types(), nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]introspection.Type) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalN__Type2ᚕgithub.comᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res) +} + +func (ec *executionContext) ___Schema_queryType(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__Schema", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.QueryType(), nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*introspection.Type) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalN__Type2ᚖgithub.comᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res) +} + +func (ec *executionContext) ___Schema_mutationType(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__Schema", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.MutationType(), nil + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*introspection.Type) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalO__Type2ᚖgithub.comᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res) +} + +func (ec *executionContext) ___Schema_subscriptionType(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__Schema", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.SubscriptionType(), nil + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*introspection.Type) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalO__Type2ᚖgithub.comᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res) +} + +func (ec *executionContext) ___Schema_directives(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__Schema", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Directives(), nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]introspection.Directive) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalN__Directive2ᚕgithub.comᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirective(ctx, field.Selections, res) +} + +func (ec *executionContext) ___Type_kind(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__Type", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Kind(), nil + }) + if resTmp == nil { + if !ec.HasError(rctx) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalN__TypeKind2string(ctx, field.Selections, res) +} + +func (ec *executionContext) ___Type_name(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__Type", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name(), nil + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOString2ᚖstring(ctx, field.Selections, res) +} + +func (ec *executionContext) ___Type_description(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__Type", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Description(), nil + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(string) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalOString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) ___Type_fields(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__Type", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field___Type_fields_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Fields(args["includeDeprecated"].(bool)), nil + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]introspection.Field) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalO__Field2ᚕgithub.comᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐField(ctx, field.Selections, res) +} + +func (ec *executionContext) ___Type_interfaces(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__Type", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Interfaces(), nil + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]introspection.Type) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalO__Type2ᚕgithub.comᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res) +} + +func (ec *executionContext) ___Type_possibleTypes(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__Type", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.PossibleTypes(), nil + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]introspection.Type) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalO__Type2ᚕgithub.comᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res) +} + +func (ec *executionContext) ___Type_enumValues(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__Type", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field___Type_enumValues_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + rctx.Args = args + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.EnumValues(args["includeDeprecated"].(bool)), nil + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]introspection.EnumValue) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalO__EnumValue2ᚕgithub.comᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValue(ctx, field.Selections, res) +} + +func (ec *executionContext) ___Type_inputFields(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__Type", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.InputFields(), nil + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]introspection.InputValue) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalO__InputValue2ᚕgithub.comᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx, field.Selections, res) +} + +func (ec *executionContext) ___Type_ofType(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) graphql.Marshaler { + ctx = ec.Tracer.StartFieldExecution(ctx, field) + defer func() { ec.Tracer.EndFieldExecution(ctx) }() + rctx := &graphql.ResolverContext{ + Object: "__Type", + Field: field, + Args: nil, + IsMethod: true, + } + ctx = graphql.WithResolverContext(ctx, rctx) + ctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx) + resTmp := ec.FieldMiddleware(ctx, obj, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.OfType(), nil + }) + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*introspection.Type) + rctx.Result = res + ctx = ec.Tracer.StartFieldChildExecution(ctx) + return ec.marshalO__Type2ᚖgithub.comᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res) +} + +// endregion **************************** field.gotpl ***************************** + +// region **************************** input.gotpl ***************************** + +func (ec *executionContext) unmarshalInputAlertSearchOptions(ctx context.Context, v interface{}) (AlertSearchOptions, error) { + var it AlertSearchOptions + var asMap = v.(map[string]interface{}) + + if _, present := asMap["first"]; !present { + asMap["first"] = 15 + } + + for k, v := range asMap { + switch k { + case "filterByStatus": + var err error + it.FilterByStatus, err = ec.unmarshalOAlertStatus2ᚕgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐAlertStatus(ctx, v) + if err != nil { + return it, err + } + case "filterByServiceID": + var err error + it.FilterByServiceID, err = ec.unmarshalOID2ᚕstring(ctx, v) + if err != nil { + return it, err + } + case "search": + var err error + it.Search, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "first": + var err error + it.First, err = ec.unmarshalOInt2ᚖint(ctx, v) + if err != nil { + return it, err + } + case "after": + var err error + it.After, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "favoritesOnly": + var err error + it.FavoritesOnly, err = ec.unmarshalOBoolean2ᚖbool(ctx, v) + if err != nil { + return it, err + } + case "omit": + var err error + it.Omit, err = ec.unmarshalOInt2ᚕint(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputAuthSubjectInput(ctx context.Context, v interface{}) (user.AuthSubject, error) { + var it user.AuthSubject + var asMap = v.(map[string]interface{}) + + for k, v := range asMap { + switch k { + case "userID": + var err error + it.UserID, err = ec.unmarshalNID2string(ctx, v) + if err != nil { + return it, err + } + case "providerID": + var err error + it.ProviderID, err = ec.unmarshalNID2string(ctx, v) + if err != nil { + return it, err + } + case "subjectID": + var err error + it.SubjectID, err = ec.unmarshalNID2string(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputConfigValueInput(ctx context.Context, v interface{}) (ConfigValueInput, error) { + var it ConfigValueInput + var asMap = v.(map[string]interface{}) + + for k, v := range asMap { + switch k { + case "id": + var err error + it.ID, err = ec.unmarshalNString2string(ctx, v) + if err != nil { + return it, err + } + case "value": + var err error + it.Value, err = ec.unmarshalNString2string(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputCreateEscalationPolicyInput(ctx context.Context, v interface{}) (CreateEscalationPolicyInput, error) { + var it CreateEscalationPolicyInput + var asMap = v.(map[string]interface{}) + + if _, present := asMap["repeat"]; !present { + asMap["repeat"] = 3 + } + + for k, v := range asMap { + switch k { + case "name": + var err error + it.Name, err = ec.unmarshalNString2string(ctx, v) + if err != nil { + return it, err + } + case "description": + var err error + it.Description, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "repeat": + var err error + it.Repeat, err = ec.unmarshalOInt2ᚖint(ctx, v) + if err != nil { + return it, err + } + case "steps": + var err error + it.Steps, err = ec.unmarshalOCreateEscalationPolicyStepInput2ᚕgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐCreateEscalationPolicyStepInput(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputCreateEscalationPolicyStepInput(ctx context.Context, v interface{}) (CreateEscalationPolicyStepInput, error) { + var it CreateEscalationPolicyStepInput + var asMap = v.(map[string]interface{}) + + for k, v := range asMap { + switch k { + case "escalationPolicyID": + var err error + it.EscalationPolicyID, err = ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "delayMinutes": + var err error + it.DelayMinutes, err = ec.unmarshalNInt2int(ctx, v) + if err != nil { + return it, err + } + case "targets": + var err error + it.Targets, err = ec.unmarshalOTargetInput2ᚕgithub.comᚋtargetᚋgoalertᚋassignmentᚐRawTarget(ctx, v) + if err != nil { + return it, err + } + case "newRotation": + var err error + it.NewRotation, err = ec.unmarshalOCreateRotationInput2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐCreateRotationInput(ctx, v) + if err != nil { + return it, err + } + case "newSchedule": + var err error + it.NewSchedule, err = ec.unmarshalOCreateScheduleInput2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐCreateScheduleInput(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputCreateIntegrationKeyInput(ctx context.Context, v interface{}) (CreateIntegrationKeyInput, error) { + var it CreateIntegrationKeyInput + var asMap = v.(map[string]interface{}) + + for k, v := range asMap { + switch k { + case "serviceID": + var err error + it.ServiceID, err = ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "type": + var err error + it.Type, err = ec.unmarshalNIntegrationKeyType2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐIntegrationKeyType(ctx, v) + if err != nil { + return it, err + } + case "name": + var err error + it.Name, err = ec.unmarshalNString2string(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputCreateRotationInput(ctx context.Context, v interface{}) (CreateRotationInput, error) { + var it CreateRotationInput + var asMap = v.(map[string]interface{}) + + if _, present := asMap["shiftLength"]; !present { + asMap["shiftLength"] = 1 + } + + for k, v := range asMap { + switch k { + case "name": + var err error + it.Name, err = ec.unmarshalNString2string(ctx, v) + if err != nil { + return it, err + } + case "description": + var err error + it.Description, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "timeZone": + var err error + it.TimeZone, err = ec.unmarshalNString2string(ctx, v) + if err != nil { + return it, err + } + case "start": + var err error + it.Start, err = ec.unmarshalNISOTimestamp2timeᚐTime(ctx, v) + if err != nil { + return it, err + } + case "type": + var err error + it.Type, err = ec.unmarshalNRotationType2githubᚗcomᚋtargetᚋgoalertᚋscheduleᚋrotationᚐType(ctx, v) + if err != nil { + return it, err + } + case "shiftLength": + var err error + it.ShiftLength, err = ec.unmarshalOInt2ᚖint(ctx, v) + if err != nil { + return it, err + } + case "userIDs": + var err error + it.UserIDs, err = ec.unmarshalOID2ᚕstring(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputCreateScheduleInput(ctx context.Context, v interface{}) (CreateScheduleInput, error) { + var it CreateScheduleInput + var asMap = v.(map[string]interface{}) + + for k, v := range asMap { + switch k { + case "name": + var err error + it.Name, err = ec.unmarshalNString2string(ctx, v) + if err != nil { + return it, err + } + case "description": + var err error + it.Description, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "timeZone": + var err error + it.TimeZone, err = ec.unmarshalNString2string(ctx, v) + if err != nil { + return it, err + } + case "targets": + var err error + it.Targets, err = ec.unmarshalOScheduleTargetInput2ᚕgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐScheduleTargetInput(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputCreateServiceInput(ctx context.Context, v interface{}) (CreateServiceInput, error) { + var it CreateServiceInput + var asMap = v.(map[string]interface{}) + + for k, v := range asMap { + switch k { + case "name": + var err error + it.Name, err = ec.unmarshalNString2string(ctx, v) + if err != nil { + return it, err + } + case "description": + var err error + it.Description, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "escalationPolicyID": + var err error + it.EscalationPolicyID, err = ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "newEscalationPolicy": + var err error + it.NewEscalationPolicy, err = ec.unmarshalOCreateEscalationPolicyInput2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐCreateEscalationPolicyInput(ctx, v) + if err != nil { + return it, err + } + case "newIntegrationKeys": + var err error + it.NewIntegrationKeys, err = ec.unmarshalOCreateIntegrationKeyInput2ᚕgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐCreateIntegrationKeyInput(ctx, v) + if err != nil { + return it, err + } + case "labels": + var err error + it.Labels, err = ec.unmarshalOSetLabelInput2ᚕgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐSetLabelInput(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputCreateUserContactMethodInput(ctx context.Context, v interface{}) (CreateUserContactMethodInput, error) { + var it CreateUserContactMethodInput + var asMap = v.(map[string]interface{}) + + for k, v := range asMap { + switch k { + case "userID": + var err error + it.UserID, err = ec.unmarshalNID2string(ctx, v) + if err != nil { + return it, err + } + case "type": + var err error + it.Type, err = ec.unmarshalNContactMethodType2githubᚗcomᚋtargetᚋgoalertᚋuserᚋcontactmethodᚐType(ctx, v) + if err != nil { + return it, err + } + case "name": + var err error + it.Name, err = ec.unmarshalNString2string(ctx, v) + if err != nil { + return it, err + } + case "value": + var err error + it.Value, err = ec.unmarshalNString2string(ctx, v) + if err != nil { + return it, err + } + case "newUserNotificationRule": + var err error + it.NewUserNotificationRule, err = ec.unmarshalOCreateUserNotificationRuleInput2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐCreateUserNotificationRuleInput(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputCreateUserNotificationRuleInput(ctx context.Context, v interface{}) (CreateUserNotificationRuleInput, error) { + var it CreateUserNotificationRuleInput + var asMap = v.(map[string]interface{}) + + for k, v := range asMap { + switch k { + case "userID": + var err error + it.UserID, err = ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "contactMethodID": + var err error + it.ContactMethodID, err = ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "delayMinutes": + var err error + it.DelayMinutes, err = ec.unmarshalNInt2int(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputCreateUserOverrideInput(ctx context.Context, v interface{}) (CreateUserOverrideInput, error) { + var it CreateUserOverrideInput + var asMap = v.(map[string]interface{}) + + for k, v := range asMap { + switch k { + case "scheduleID": + var err error + it.ScheduleID, err = ec.unmarshalNID2string(ctx, v) + if err != nil { + return it, err + } + case "start": + var err error + it.Start, err = ec.unmarshalNISOTimestamp2timeᚐTime(ctx, v) + if err != nil { + return it, err + } + case "end": + var err error + it.End, err = ec.unmarshalNISOTimestamp2timeᚐTime(ctx, v) + if err != nil { + return it, err + } + case "addUserID": + var err error + it.AddUserID, err = ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "removeUserID": + var err error + it.RemoveUserID, err = ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputEscalationPolicySearchOptions(ctx context.Context, v interface{}) (EscalationPolicySearchOptions, error) { + var it EscalationPolicySearchOptions + var asMap = v.(map[string]interface{}) + + if _, present := asMap["first"]; !present { + asMap["first"] = 15 + } + + for k, v := range asMap { + switch k { + case "first": + var err error + it.First, err = ec.unmarshalOInt2ᚖint(ctx, v) + if err != nil { + return it, err + } + case "after": + var err error + it.After, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "search": + var err error + it.Search, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "omit": + var err error + it.Omit, err = ec.unmarshalOID2ᚕstring(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputLabelSearchOptions(ctx context.Context, v interface{}) (LabelSearchOptions, error) { + var it LabelSearchOptions + var asMap = v.(map[string]interface{}) + + if _, present := asMap["first"]; !present { + asMap["first"] = 15 + } + + for k, v := range asMap { + switch k { + case "first": + var err error + it.First, err = ec.unmarshalOInt2ᚖint(ctx, v) + if err != nil { + return it, err + } + case "after": + var err error + it.After, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "search": + var err error + it.Search, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "uniqueKeys": + var err error + it.UniqueKeys, err = ec.unmarshalOBoolean2ᚖbool(ctx, v) + if err != nil { + return it, err + } + case "omit": + var err error + it.Omit, err = ec.unmarshalOID2ᚕstring(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputRotationSearchOptions(ctx context.Context, v interface{}) (RotationSearchOptions, error) { + var it RotationSearchOptions + var asMap = v.(map[string]interface{}) + + if _, present := asMap["first"]; !present { + asMap["first"] = 15 + } + + for k, v := range asMap { + switch k { + case "first": + var err error + it.First, err = ec.unmarshalOInt2ᚖint(ctx, v) + if err != nil { + return it, err + } + case "after": + var err error + it.After, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "search": + var err error + it.Search, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "omit": + var err error + it.Omit, err = ec.unmarshalOID2ᚕstring(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputScheduleRuleInput(ctx context.Context, v interface{}) (ScheduleRuleInput, error) { + var it ScheduleRuleInput + var asMap = v.(map[string]interface{}) + + for k, v := range asMap { + switch k { + case "id": + var err error + it.ID, err = ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "start": + var err error + it.Start, err = ec.unmarshalOClockTime2ᚖgithub.comᚋtargetᚋgoalertᚋscheduleᚋruleᚐClock(ctx, v) + if err != nil { + return it, err + } + case "end": + var err error + it.End, err = ec.unmarshalOClockTime2ᚖgithub.comᚋtargetᚋgoalertᚋscheduleᚋruleᚐClock(ctx, v) + if err != nil { + return it, err + } + case "weekdayFilter": + var err error + it.WeekdayFilter, err = ec.unmarshalOBoolean2ᚕbool(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputScheduleSearchOptions(ctx context.Context, v interface{}) (ScheduleSearchOptions, error) { + var it ScheduleSearchOptions + var asMap = v.(map[string]interface{}) + + if _, present := asMap["first"]; !present { + asMap["first"] = 15 + } + + for k, v := range asMap { + switch k { + case "first": + var err error + it.First, err = ec.unmarshalOInt2ᚖint(ctx, v) + if err != nil { + return it, err + } + case "after": + var err error + it.After, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "search": + var err error + it.Search, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "omit": + var err error + it.Omit, err = ec.unmarshalOID2ᚕstring(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputScheduleTargetInput(ctx context.Context, v interface{}) (ScheduleTargetInput, error) { + var it ScheduleTargetInput + var asMap = v.(map[string]interface{}) + + for k, v := range asMap { + switch k { + case "scheduleID": + var err error + it.ScheduleID, err = ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "target": + var err error + it.Target, err = ec.unmarshalOTargetInput2ᚖgithub.comᚋtargetᚋgoalertᚋassignmentᚐRawTarget(ctx, v) + if err != nil { + return it, err + } + case "newRotation": + var err error + it.NewRotation, err = ec.unmarshalOCreateRotationInput2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐCreateRotationInput(ctx, v) + if err != nil { + return it, err + } + case "rules": + var err error + it.Rules, err = ec.unmarshalNScheduleRuleInput2ᚕgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐScheduleRuleInput(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputServiceSearchOptions(ctx context.Context, v interface{}) (ServiceSearchOptions, error) { + var it ServiceSearchOptions + var asMap = v.(map[string]interface{}) + + if _, present := asMap["first"]; !present { + asMap["first"] = 15 + } + + for k, v := range asMap { + switch k { + case "first": + var err error + it.First, err = ec.unmarshalOInt2ᚖint(ctx, v) + if err != nil { + return it, err + } + case "after": + var err error + it.After, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "search": + var err error + it.Search, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "omit": + var err error + it.Omit, err = ec.unmarshalOID2ᚕstring(ctx, v) + if err != nil { + return it, err + } + case "favoritesOnly": + var err error + it.FavoritesOnly, err = ec.unmarshalOBoolean2ᚖbool(ctx, v) + if err != nil { + return it, err + } + case "favoritesFirst": + var err error + it.FavoritesFirst, err = ec.unmarshalOBoolean2ᚖbool(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputSetFavoriteInput(ctx context.Context, v interface{}) (SetFavoriteInput, error) { + var it SetFavoriteInput + var asMap = v.(map[string]interface{}) + + for k, v := range asMap { + switch k { + case "target": + var err error + it.Target, err = ec.unmarshalNTargetInput2githubᚗcomᚋtargetᚋgoalertᚋassignmentᚐRawTarget(ctx, v) + if err != nil { + return it, err + } + case "favorite": + var err error + it.Favorite, err = ec.unmarshalNBoolean2bool(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputSetLabelInput(ctx context.Context, v interface{}) (SetLabelInput, error) { + var it SetLabelInput + var asMap = v.(map[string]interface{}) + + for k, v := range asMap { + switch k { + case "target": + var err error + it.Target, err = ec.unmarshalOTargetInput2ᚖgithub.comᚋtargetᚋgoalertᚋassignmentᚐRawTarget(ctx, v) + if err != nil { + return it, err + } + case "key": + var err error + it.Key, err = ec.unmarshalNString2string(ctx, v) + if err != nil { + return it, err + } + case "value": + var err error + it.Value, err = ec.unmarshalNString2string(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputSlackChannelSearchOptions(ctx context.Context, v interface{}) (SlackChannelSearchOptions, error) { + var it SlackChannelSearchOptions + var asMap = v.(map[string]interface{}) + + if _, present := asMap["first"]; !present { + asMap["first"] = 15 + } + + for k, v := range asMap { + switch k { + case "first": + var err error + it.First, err = ec.unmarshalOInt2ᚖint(ctx, v) + if err != nil { + return it, err + } + case "after": + var err error + it.After, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "search": + var err error + it.Search, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "omit": + var err error + it.Omit, err = ec.unmarshalOID2ᚕstring(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputTargetInput(ctx context.Context, v interface{}) (assignment.RawTarget, error) { + var it assignment.RawTarget + var asMap = v.(map[string]interface{}) + + for k, v := range asMap { + switch k { + case "id": + var err error + it.ID, err = ec.unmarshalNID2string(ctx, v) + if err != nil { + return it, err + } + case "type": + var err error + it.Type, err = ec.unmarshalNTargetType2githubᚗcomᚋtargetᚋgoalertᚋassignmentᚐTargetType(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputTimeZoneSearchOptions(ctx context.Context, v interface{}) (TimeZoneSearchOptions, error) { + var it TimeZoneSearchOptions + var asMap = v.(map[string]interface{}) + + if _, present := asMap["first"]; !present { + asMap["first"] = 15 + } + + for k, v := range asMap { + switch k { + case "first": + var err error + it.First, err = ec.unmarshalOInt2ᚖint(ctx, v) + if err != nil { + return it, err + } + case "after": + var err error + it.After, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "search": + var err error + it.Search, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "omit": + var err error + it.Omit, err = ec.unmarshalOID2ᚕstring(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputUpdateAlertsInput(ctx context.Context, v interface{}) (UpdateAlertsInput, error) { + var it UpdateAlertsInput + var asMap = v.(map[string]interface{}) + + for k, v := range asMap { + switch k { + case "alertIDs": + var err error + it.AlertIDs, err = ec.unmarshalNInt2ᚕint(ctx, v) + if err != nil { + return it, err + } + case "newStatus": + var err error + it.NewStatus, err = ec.unmarshalNAlertStatus2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐAlertStatus(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputUpdateEscalationPolicyInput(ctx context.Context, v interface{}) (UpdateEscalationPolicyInput, error) { + var it UpdateEscalationPolicyInput + var asMap = v.(map[string]interface{}) + + for k, v := range asMap { + switch k { + case "id": + var err error + it.ID, err = ec.unmarshalNID2string(ctx, v) + if err != nil { + return it, err + } + case "name": + var err error + it.Name, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "description": + var err error + it.Description, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "repeat": + var err error + it.Repeat, err = ec.unmarshalOInt2ᚖint(ctx, v) + if err != nil { + return it, err + } + case "stepIDs": + var err error + it.StepIDs, err = ec.unmarshalOString2ᚕstring(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputUpdateEscalationPolicyStepInput(ctx context.Context, v interface{}) (UpdateEscalationPolicyStepInput, error) { + var it UpdateEscalationPolicyStepInput + var asMap = v.(map[string]interface{}) + + for k, v := range asMap { + switch k { + case "id": + var err error + it.ID, err = ec.unmarshalNID2string(ctx, v) + if err != nil { + return it, err + } + case "delayMinutes": + var err error + it.DelayMinutes, err = ec.unmarshalOInt2ᚖint(ctx, v) + if err != nil { + return it, err + } + case "targets": + var err error + it.Targets, err = ec.unmarshalOTargetInput2ᚕgithub.comᚋtargetᚋgoalertᚋassignmentᚐRawTarget(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputUpdateRotationInput(ctx context.Context, v interface{}) (UpdateRotationInput, error) { + var it UpdateRotationInput + var asMap = v.(map[string]interface{}) + + for k, v := range asMap { + switch k { + case "id": + var err error + it.ID, err = ec.unmarshalNID2string(ctx, v) + if err != nil { + return it, err + } + case "name": + var err error + it.Name, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "description": + var err error + it.Description, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "timeZone": + var err error + it.TimeZone, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "start": + var err error + it.Start, err = ec.unmarshalOISOTimestamp2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + case "type": + var err error + it.Type, err = ec.unmarshalORotationType2ᚖgithub.comᚋtargetᚋgoalertᚋscheduleᚋrotationᚐType(ctx, v) + if err != nil { + return it, err + } + case "shiftLength": + var err error + it.ShiftLength, err = ec.unmarshalOInt2ᚖint(ctx, v) + if err != nil { + return it, err + } + case "activeUserIndex": + var err error + it.ActiveUserIndex, err = ec.unmarshalOInt2ᚖint(ctx, v) + if err != nil { + return it, err + } + case "userIDs": + var err error + it.UserIDs, err = ec.unmarshalOID2ᚕstring(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputUpdateScheduleInput(ctx context.Context, v interface{}) (UpdateScheduleInput, error) { + var it UpdateScheduleInput + var asMap = v.(map[string]interface{}) + + for k, v := range asMap { + switch k { + case "id": + var err error + it.ID, err = ec.unmarshalNID2string(ctx, v) + if err != nil { + return it, err + } + case "name": + var err error + it.Name, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "description": + var err error + it.Description, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "timeZone": + var err error + it.TimeZone, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputUpdateServiceInput(ctx context.Context, v interface{}) (UpdateServiceInput, error) { + var it UpdateServiceInput + var asMap = v.(map[string]interface{}) + + for k, v := range asMap { + switch k { + case "id": + var err error + it.ID, err = ec.unmarshalNID2string(ctx, v) + if err != nil { + return it, err + } + case "name": + var err error + it.Name, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "description": + var err error + it.Description, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "escalationPolicyID": + var err error + it.EscalationPolicyID, err = ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputUpdateUserContactMethodInput(ctx context.Context, v interface{}) (UpdateUserContactMethodInput, error) { + var it UpdateUserContactMethodInput + var asMap = v.(map[string]interface{}) + + for k, v := range asMap { + switch k { + case "id": + var err error + it.ID, err = ec.unmarshalNID2string(ctx, v) + if err != nil { + return it, err + } + case "name": + var err error + it.Name, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "value": + var err error + it.Value, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputUpdateUserInput(ctx context.Context, v interface{}) (UpdateUserInput, error) { + var it UpdateUserInput + var asMap = v.(map[string]interface{}) + + for k, v := range asMap { + switch k { + case "id": + var err error + it.ID, err = ec.unmarshalNID2string(ctx, v) + if err != nil { + return it, err + } + case "name": + var err error + it.Name, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "email": + var err error + it.Email, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "role": + var err error + it.Role, err = ec.unmarshalOUserRole2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐUserRole(ctx, v) + if err != nil { + return it, err + } + case "statusUpdateContactMethodID": + var err error + it.StatusUpdateContactMethodID, err = ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputUpdateUserOverrideInput(ctx context.Context, v interface{}) (UpdateUserOverrideInput, error) { + var it UpdateUserOverrideInput + var asMap = v.(map[string]interface{}) + + for k, v := range asMap { + switch k { + case "id": + var err error + it.ID, err = ec.unmarshalNID2string(ctx, v) + if err != nil { + return it, err + } + case "start": + var err error + it.Start, err = ec.unmarshalOISOTimestamp2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + case "end": + var err error + it.End, err = ec.unmarshalOISOTimestamp2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + case "addUserID": + var err error + it.AddUserID, err = ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "removeUserID": + var err error + it.RemoveUserID, err = ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputUserOverrideSearchOptions(ctx context.Context, v interface{}) (UserOverrideSearchOptions, error) { + var it UserOverrideSearchOptions + var asMap = v.(map[string]interface{}) + + if _, present := asMap["first"]; !present { + asMap["first"] = 15 + } + + for k, v := range asMap { + switch k { + case "first": + var err error + it.First, err = ec.unmarshalOInt2ᚖint(ctx, v) + if err != nil { + return it, err + } + case "after": + var err error + it.After, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "omit": + var err error + it.Omit, err = ec.unmarshalOID2ᚕstring(ctx, v) + if err != nil { + return it, err + } + case "scheduleID": + var err error + it.ScheduleID, err = ec.unmarshalOID2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "filterAddUserID": + var err error + it.FilterAddUserID, err = ec.unmarshalOID2ᚕstring(ctx, v) + if err != nil { + return it, err + } + case "filterRemoveUserID": + var err error + it.FilterRemoveUserID, err = ec.unmarshalOID2ᚕstring(ctx, v) + if err != nil { + return it, err + } + case "filterAnyUserID": + var err error + it.FilterAnyUserID, err = ec.unmarshalOID2ᚕstring(ctx, v) + if err != nil { + return it, err + } + case "start": + var err error + it.Start, err = ec.unmarshalOISOTimestamp2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + case "end": + var err error + it.End, err = ec.unmarshalOISOTimestamp2ᚖtimeᚐTime(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputUserSearchOptions(ctx context.Context, v interface{}) (UserSearchOptions, error) { + var it UserSearchOptions + var asMap = v.(map[string]interface{}) + + if _, present := asMap["first"]; !present { + asMap["first"] = 15 + } + + for k, v := range asMap { + switch k { + case "first": + var err error + it.First, err = ec.unmarshalOInt2ᚖint(ctx, v) + if err != nil { + return it, err + } + case "after": + var err error + it.After, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "search": + var err error + it.Search, err = ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + case "omit": + var err error + it.Omit, err = ec.unmarshalOID2ᚕstring(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +// endregion **************************** input.gotpl ***************************** + +// region ************************** interface.gotpl *************************** + +// endregion ************************** interface.gotpl *************************** + +// region **************************** object.gotpl **************************** + +var alertImplementors = []string{"Alert"} + +func (ec *executionContext) _Alert(ctx context.Context, sel ast.SelectionSet, obj *alert.Alert) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, alertImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("Alert") + case "id": + out.Values[i] = ec._Alert_id(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "alertID": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Alert_alertID(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + case "status": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Alert_status(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + case "summary": + out.Values[i] = ec._Alert_summary(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "details": + out.Values[i] = ec._Alert_details(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "createdAt": + out.Values[i] = ec._Alert_createdAt(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "serviceID": + out.Values[i] = ec._Alert_serviceID(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "service": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Alert_service(ctx, field, obj) + return res + }) + case "state": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Alert_state(ctx, field, obj) + return res + }) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var alertConnectionImplementors = []string{"AlertConnection"} + +func (ec *executionContext) _AlertConnection(ctx context.Context, sel ast.SelectionSet, obj *AlertConnection) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, alertConnectionImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("AlertConnection") + case "nodes": + out.Values[i] = ec._AlertConnection_nodes(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "pageInfo": + out.Values[i] = ec._AlertConnection_pageInfo(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var alertStateImplementors = []string{"AlertState"} + +func (ec *executionContext) _AlertState(ctx context.Context, sel ast.SelectionSet, obj *alert.State) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, alertStateImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("AlertState") + case "lastEscalation": + out.Values[i] = ec._AlertState_lastEscalation(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "stepNumber": + out.Values[i] = ec._AlertState_stepNumber(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "repeatCount": + out.Values[i] = ec._AlertState_repeatCount(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var authSubjectImplementors = []string{"AuthSubject"} + +func (ec *executionContext) _AuthSubject(ctx context.Context, sel ast.SelectionSet, obj *user.AuthSubject) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, authSubjectImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("AuthSubject") + case "providerID": + out.Values[i] = ec._AuthSubject_providerID(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "subjectID": + out.Values[i] = ec._AuthSubject_subjectID(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "userID": + out.Values[i] = ec._AuthSubject_userID(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var authSubjectConnectionImplementors = []string{"AuthSubjectConnection"} + +func (ec *executionContext) _AuthSubjectConnection(ctx context.Context, sel ast.SelectionSet, obj *AuthSubjectConnection) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, authSubjectConnectionImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("AuthSubjectConnection") + case "nodes": + out.Values[i] = ec._AuthSubjectConnection_nodes(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "pageInfo": + out.Values[i] = ec._AuthSubjectConnection_pageInfo(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var configValueImplementors = []string{"ConfigValue"} + +func (ec *executionContext) _ConfigValue(ctx context.Context, sel ast.SelectionSet, obj *ConfigValue) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, configValueImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("ConfigValue") + case "id": + out.Values[i] = ec._ConfigValue_id(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "description": + out.Values[i] = ec._ConfigValue_description(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "value": + out.Values[i] = ec._ConfigValue_value(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "type": + out.Values[i] = ec._ConfigValue_type(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "password": + out.Values[i] = ec._ConfigValue_password(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var escalationPolicyImplementors = []string{"EscalationPolicy"} + +func (ec *executionContext) _EscalationPolicy(ctx context.Context, sel ast.SelectionSet, obj *escalation.Policy) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, escalationPolicyImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("EscalationPolicy") + case "id": + out.Values[i] = ec._EscalationPolicy_id(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "name": + out.Values[i] = ec._EscalationPolicy_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "description": + out.Values[i] = ec._EscalationPolicy_description(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "repeat": + out.Values[i] = ec._EscalationPolicy_repeat(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "assignedTo": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._EscalationPolicy_assignedTo(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + case "steps": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._EscalationPolicy_steps(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var escalationPolicyConnectionImplementors = []string{"EscalationPolicyConnection"} + +func (ec *executionContext) _EscalationPolicyConnection(ctx context.Context, sel ast.SelectionSet, obj *EscalationPolicyConnection) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, escalationPolicyConnectionImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("EscalationPolicyConnection") + case "nodes": + out.Values[i] = ec._EscalationPolicyConnection_nodes(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "pageInfo": + out.Values[i] = ec._EscalationPolicyConnection_pageInfo(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var escalationPolicyStepImplementors = []string{"EscalationPolicyStep"} + +func (ec *executionContext) _EscalationPolicyStep(ctx context.Context, sel ast.SelectionSet, obj *escalation.Step) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, escalationPolicyStepImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("EscalationPolicyStep") + case "id": + out.Values[i] = ec._EscalationPolicyStep_id(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "stepNumber": + out.Values[i] = ec._EscalationPolicyStep_stepNumber(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "delayMinutes": + out.Values[i] = ec._EscalationPolicyStep_delayMinutes(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "targets": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._EscalationPolicyStep_targets(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + case "escalationPolicy": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._EscalationPolicyStep_escalationPolicy(ctx, field, obj) + return res + }) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var integrationKeyImplementors = []string{"IntegrationKey"} + +func (ec *executionContext) _IntegrationKey(ctx context.Context, sel ast.SelectionSet, obj *integrationkey.IntegrationKey) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, integrationKeyImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("IntegrationKey") + case "id": + out.Values[i] = ec._IntegrationKey_id(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "serviceID": + out.Values[i] = ec._IntegrationKey_serviceID(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "type": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._IntegrationKey_type(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + case "name": + out.Values[i] = ec._IntegrationKey_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "href": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._IntegrationKey_href(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var labelImplementors = []string{"Label"} + +func (ec *executionContext) _Label(ctx context.Context, sel ast.SelectionSet, obj *label.Label) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, labelImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("Label") + case "key": + out.Values[i] = ec._Label_key(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "value": + out.Values[i] = ec._Label_value(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var labelConnectionImplementors = []string{"LabelConnection"} + +func (ec *executionContext) _LabelConnection(ctx context.Context, sel ast.SelectionSet, obj *LabelConnection) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, labelConnectionImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("LabelConnection") + case "nodes": + out.Values[i] = ec._LabelConnection_nodes(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "pageInfo": + out.Values[i] = ec._LabelConnection_pageInfo(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var mutationImplementors = []string{"Mutation"} + +func (ec *executionContext) _Mutation(ctx context.Context, sel ast.SelectionSet) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, mutationImplementors) + + ctx = graphql.WithResolverContext(ctx, &graphql.ResolverContext{ + Object: "Mutation", + }) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("Mutation") + case "addAuthSubject": + out.Values[i] = ec._Mutation_addAuthSubject(ctx, field) + if out.Values[i] == graphql.Null { + invalid = true + } + case "deleteAuthSubject": + out.Values[i] = ec._Mutation_deleteAuthSubject(ctx, field) + if out.Values[i] == graphql.Null { + invalid = true + } + case "updateUser": + out.Values[i] = ec._Mutation_updateUser(ctx, field) + if out.Values[i] == graphql.Null { + invalid = true + } + case "testContactMethod": + out.Values[i] = ec._Mutation_testContactMethod(ctx, field) + if out.Values[i] == graphql.Null { + invalid = true + } + case "updateAlerts": + out.Values[i] = ec._Mutation_updateAlerts(ctx, field) + case "updateRotation": + out.Values[i] = ec._Mutation_updateRotation(ctx, field) + if out.Values[i] == graphql.Null { + invalid = true + } + case "escalateAlerts": + out.Values[i] = ec._Mutation_escalateAlerts(ctx, field) + case "setFavorite": + out.Values[i] = ec._Mutation_setFavorite(ctx, field) + if out.Values[i] == graphql.Null { + invalid = true + } + case "updateService": + out.Values[i] = ec._Mutation_updateService(ctx, field) + if out.Values[i] == graphql.Null { + invalid = true + } + case "updateEscalationPolicy": + out.Values[i] = ec._Mutation_updateEscalationPolicy(ctx, field) + if out.Values[i] == graphql.Null { + invalid = true + } + case "updateEscalationPolicyStep": + out.Values[i] = ec._Mutation_updateEscalationPolicyStep(ctx, field) + if out.Values[i] == graphql.Null { + invalid = true + } + case "deleteAll": + out.Values[i] = ec._Mutation_deleteAll(ctx, field) + if out.Values[i] == graphql.Null { + invalid = true + } + case "createService": + out.Values[i] = ec._Mutation_createService(ctx, field) + case "createEscalationPolicy": + out.Values[i] = ec._Mutation_createEscalationPolicy(ctx, field) + case "createEscalationPolicyStep": + out.Values[i] = ec._Mutation_createEscalationPolicyStep(ctx, field) + case "createRotation": + out.Values[i] = ec._Mutation_createRotation(ctx, field) + case "createIntegrationKey": + out.Values[i] = ec._Mutation_createIntegrationKey(ctx, field) + case "setLabel": + out.Values[i] = ec._Mutation_setLabel(ctx, field) + if out.Values[i] == graphql.Null { + invalid = true + } + case "createSchedule": + out.Values[i] = ec._Mutation_createSchedule(ctx, field) + case "updateScheduleTarget": + out.Values[i] = ec._Mutation_updateScheduleTarget(ctx, field) + if out.Values[i] == graphql.Null { + invalid = true + } + case "createUserOverride": + out.Values[i] = ec._Mutation_createUserOverride(ctx, field) + case "createUserContactMethod": + out.Values[i] = ec._Mutation_createUserContactMethod(ctx, field) + case "createUserNotificationRule": + out.Values[i] = ec._Mutation_createUserNotificationRule(ctx, field) + case "updateUserContactMethod": + out.Values[i] = ec._Mutation_updateUserContactMethod(ctx, field) + if out.Values[i] == graphql.Null { + invalid = true + } + case "updateSchedule": + out.Values[i] = ec._Mutation_updateSchedule(ctx, field) + if out.Values[i] == graphql.Null { + invalid = true + } + case "updateUserOverride": + out.Values[i] = ec._Mutation_updateUserOverride(ctx, field) + if out.Values[i] == graphql.Null { + invalid = true + } + case "setConfig": + out.Values[i] = ec._Mutation_setConfig(ctx, field) + if out.Values[i] == graphql.Null { + invalid = true + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var onCallShiftImplementors = []string{"OnCallShift"} + +func (ec *executionContext) _OnCallShift(ctx context.Context, sel ast.SelectionSet, obj *oncall.Shift) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, onCallShiftImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("OnCallShift") + case "userID": + out.Values[i] = ec._OnCallShift_userID(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "user": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._OnCallShift_user(ctx, field, obj) + return res + }) + case "start": + out.Values[i] = ec._OnCallShift_start(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "end": + out.Values[i] = ec._OnCallShift_end(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "truncated": + out.Values[i] = ec._OnCallShift_truncated(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var pageInfoImplementors = []string{"PageInfo"} + +func (ec *executionContext) _PageInfo(ctx context.Context, sel ast.SelectionSet, obj *PageInfo) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, pageInfoImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("PageInfo") + case "endCursor": + out.Values[i] = ec._PageInfo_endCursor(ctx, field, obj) + case "hasNextPage": + out.Values[i] = ec._PageInfo_hasNextPage(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var queryImplementors = []string{"Query"} + +func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, queryImplementors) + + ctx = graphql.WithResolverContext(ctx, &graphql.ResolverContext{ + Object: "Query", + }) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("Query") + case "user": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_user(ctx, field) + return res + }) + case "users": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_users(ctx, field) + if res == graphql.Null { + invalid = true + } + return res + }) + case "alert": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_alert(ctx, field) + return res + }) + case "alerts": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_alerts(ctx, field) + if res == graphql.Null { + invalid = true + } + return res + }) + case "service": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_service(ctx, field) + return res + }) + case "integrationKey": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_integrationKey(ctx, field) + return res + }) + case "services": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_services(ctx, field) + if res == graphql.Null { + invalid = true + } + return res + }) + case "rotation": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_rotation(ctx, field) + return res + }) + case "rotations": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_rotations(ctx, field) + if res == graphql.Null { + invalid = true + } + return res + }) + case "schedule": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_schedule(ctx, field) + return res + }) + case "schedules": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_schedules(ctx, field) + if res == graphql.Null { + invalid = true + } + return res + }) + case "escalationPolicy": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_escalationPolicy(ctx, field) + return res + }) + case "escalationPolicies": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_escalationPolicies(ctx, field) + if res == graphql.Null { + invalid = true + } + return res + }) + case "authSubjectsForProvider": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_authSubjectsForProvider(ctx, field) + if res == graphql.Null { + invalid = true + } + return res + }) + case "timeZones": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_timeZones(ctx, field) + if res == graphql.Null { + invalid = true + } + return res + }) + case "labels": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_labels(ctx, field) + if res == graphql.Null { + invalid = true + } + return res + }) + case "userOverrides": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_userOverrides(ctx, field) + if res == graphql.Null { + invalid = true + } + return res + }) + case "userOverride": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_userOverride(ctx, field) + return res + }) + case "config": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_config(ctx, field) + if res == graphql.Null { + invalid = true + } + return res + }) + case "userContactMethod": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_userContactMethod(ctx, field) + return res + }) + case "slackChannels": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_slackChannels(ctx, field) + if res == graphql.Null { + invalid = true + } + return res + }) + case "slackChannel": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_slackChannel(ctx, field) + return res + }) + case "__type": + out.Values[i] = ec._Query___type(ctx, field) + case "__schema": + out.Values[i] = ec._Query___schema(ctx, field) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var rotationImplementors = []string{"Rotation"} + +func (ec *executionContext) _Rotation(ctx context.Context, sel ast.SelectionSet, obj *rotation.Rotation) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, rotationImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("Rotation") + case "id": + out.Values[i] = ec._Rotation_id(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "name": + out.Values[i] = ec._Rotation_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "description": + out.Values[i] = ec._Rotation_description(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "start": + out.Values[i] = ec._Rotation_start(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "timeZone": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Rotation_timeZone(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + case "type": + out.Values[i] = ec._Rotation_type(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "shiftLength": + out.Values[i] = ec._Rotation_shiftLength(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "activeUserIndex": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Rotation_activeUserIndex(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + case "userIDs": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Rotation_userIDs(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + case "users": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Rotation_users(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + case "nextHandoffTimes": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Rotation_nextHandoffTimes(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var rotationConnectionImplementors = []string{"RotationConnection"} + +func (ec *executionContext) _RotationConnection(ctx context.Context, sel ast.SelectionSet, obj *RotationConnection) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, rotationConnectionImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("RotationConnection") + case "nodes": + out.Values[i] = ec._RotationConnection_nodes(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "pageInfo": + out.Values[i] = ec._RotationConnection_pageInfo(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var scheduleImplementors = []string{"Schedule"} + +func (ec *executionContext) _Schedule(ctx context.Context, sel ast.SelectionSet, obj *schedule.Schedule) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, scheduleImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("Schedule") + case "id": + out.Values[i] = ec._Schedule_id(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "name": + out.Values[i] = ec._Schedule_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "description": + out.Values[i] = ec._Schedule_description(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "timeZone": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Schedule_timeZone(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + case "assignedTo": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Schedule_assignedTo(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + case "shifts": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Schedule_shifts(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + case "targets": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Schedule_targets(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + case "target": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Schedule_target(ctx, field, obj) + return res + }) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var scheduleConnectionImplementors = []string{"ScheduleConnection"} + +func (ec *executionContext) _ScheduleConnection(ctx context.Context, sel ast.SelectionSet, obj *ScheduleConnection) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, scheduleConnectionImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("ScheduleConnection") + case "nodes": + out.Values[i] = ec._ScheduleConnection_nodes(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "pageInfo": + out.Values[i] = ec._ScheduleConnection_pageInfo(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var scheduleRuleImplementors = []string{"ScheduleRule"} + +func (ec *executionContext) _ScheduleRule(ctx context.Context, sel ast.SelectionSet, obj *rule.Rule) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, scheduleRuleImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("ScheduleRule") + case "id": + out.Values[i] = ec._ScheduleRule_id(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "scheduleID": + out.Values[i] = ec._ScheduleRule_scheduleID(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "start": + out.Values[i] = ec._ScheduleRule_start(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "end": + out.Values[i] = ec._ScheduleRule_end(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "weekdayFilter": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._ScheduleRule_weekdayFilter(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + case "target": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._ScheduleRule_target(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var scheduleTargetImplementors = []string{"ScheduleTarget"} + +func (ec *executionContext) _ScheduleTarget(ctx context.Context, sel ast.SelectionSet, obj *ScheduleTarget) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, scheduleTargetImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("ScheduleTarget") + case "scheduleID": + out.Values[i] = ec._ScheduleTarget_scheduleID(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "target": + out.Values[i] = ec._ScheduleTarget_target(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "rules": + out.Values[i] = ec._ScheduleTarget_rules(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var serviceImplementors = []string{"Service"} + +func (ec *executionContext) _Service(ctx context.Context, sel ast.SelectionSet, obj *service.Service) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, serviceImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("Service") + case "id": + out.Values[i] = ec._Service_id(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "name": + out.Values[i] = ec._Service_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "description": + out.Values[i] = ec._Service_description(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "escalationPolicyID": + out.Values[i] = ec._Service_escalationPolicyID(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "escalationPolicy": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Service_escalationPolicy(ctx, field, obj) + return res + }) + case "isFavorite": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Service_isFavorite(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + case "onCallUsers": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Service_onCallUsers(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + case "integrationKeys": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Service_integrationKeys(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + case "labels": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Service_labels(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var serviceConnectionImplementors = []string{"ServiceConnection"} + +func (ec *executionContext) _ServiceConnection(ctx context.Context, sel ast.SelectionSet, obj *ServiceConnection) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, serviceConnectionImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("ServiceConnection") + case "nodes": + out.Values[i] = ec._ServiceConnection_nodes(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "pageInfo": + out.Values[i] = ec._ServiceConnection_pageInfo(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var serviceOnCallUserImplementors = []string{"ServiceOnCallUser"} + +func (ec *executionContext) _ServiceOnCallUser(ctx context.Context, sel ast.SelectionSet, obj *oncall.ServiceOnCallUser) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, serviceOnCallUserImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("ServiceOnCallUser") + case "userID": + out.Values[i] = ec._ServiceOnCallUser_userID(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "userName": + out.Values[i] = ec._ServiceOnCallUser_userName(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "stepNumber": + out.Values[i] = ec._ServiceOnCallUser_stepNumber(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var slackChannelImplementors = []string{"SlackChannel"} + +func (ec *executionContext) _SlackChannel(ctx context.Context, sel ast.SelectionSet, obj *slack.Channel) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, slackChannelImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("SlackChannel") + case "id": + out.Values[i] = ec._SlackChannel_id(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "name": + out.Values[i] = ec._SlackChannel_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var slackChannelConnectionImplementors = []string{"SlackChannelConnection"} + +func (ec *executionContext) _SlackChannelConnection(ctx context.Context, sel ast.SelectionSet, obj *SlackChannelConnection) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, slackChannelConnectionImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("SlackChannelConnection") + case "nodes": + out.Values[i] = ec._SlackChannelConnection_nodes(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "pageInfo": + out.Values[i] = ec._SlackChannelConnection_pageInfo(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var targetImplementors = []string{"Target"} + +func (ec *executionContext) _Target(ctx context.Context, sel ast.SelectionSet, obj *assignment.RawTarget) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, targetImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("Target") + case "id": + out.Values[i] = ec._Target_id(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "type": + out.Values[i] = ec._Target_type(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "name": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Target_name(ctx, field, obj) + return res + }) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var timeZoneImplementors = []string{"TimeZone"} + +func (ec *executionContext) _TimeZone(ctx context.Context, sel ast.SelectionSet, obj *TimeZone) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, timeZoneImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("TimeZone") + case "id": + out.Values[i] = ec._TimeZone_id(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var timeZoneConnectionImplementors = []string{"TimeZoneConnection"} + +func (ec *executionContext) _TimeZoneConnection(ctx context.Context, sel ast.SelectionSet, obj *TimeZoneConnection) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, timeZoneConnectionImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("TimeZoneConnection") + case "nodes": + out.Values[i] = ec._TimeZoneConnection_nodes(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "pageInfo": + out.Values[i] = ec._TimeZoneConnection_pageInfo(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var userImplementors = []string{"User"} + +func (ec *executionContext) _User(ctx context.Context, sel ast.SelectionSet, obj *user.User) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, userImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("User") + case "id": + out.Values[i] = ec._User_id(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "role": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._User_role(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + case "name": + out.Values[i] = ec._User_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "email": + out.Values[i] = ec._User_email(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "contactMethods": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._User_contactMethods(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + case "notificationRules": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._User_notificationRules(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + case "statusUpdateContactMethodID": + out.Values[i] = ec._User_statusUpdateContactMethodID(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "authSubjects": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._User_authSubjects(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + case "onCallSteps": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._User_onCallSteps(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var userConnectionImplementors = []string{"UserConnection"} + +func (ec *executionContext) _UserConnection(ctx context.Context, sel ast.SelectionSet, obj *UserConnection) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, userConnectionImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("UserConnection") + case "nodes": + out.Values[i] = ec._UserConnection_nodes(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "pageInfo": + out.Values[i] = ec._UserConnection_pageInfo(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var userContactMethodImplementors = []string{"UserContactMethod"} + +func (ec *executionContext) _UserContactMethod(ctx context.Context, sel ast.SelectionSet, obj *contactmethod.ContactMethod) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, userContactMethodImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("UserContactMethod") + case "id": + out.Values[i] = ec._UserContactMethod_id(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "type": + out.Values[i] = ec._UserContactMethod_type(ctx, field, obj) + case "name": + out.Values[i] = ec._UserContactMethod_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "value": + out.Values[i] = ec._UserContactMethod_value(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var userNotificationRuleImplementors = []string{"UserNotificationRule"} + +func (ec *executionContext) _UserNotificationRule(ctx context.Context, sel ast.SelectionSet, obj *notificationrule.NotificationRule) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, userNotificationRuleImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("UserNotificationRule") + case "id": + out.Values[i] = ec._UserNotificationRule_id(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "delayMinutes": + out.Values[i] = ec._UserNotificationRule_delayMinutes(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "contactMethodID": + out.Values[i] = ec._UserNotificationRule_contactMethodID(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "contactMethod": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._UserNotificationRule_contactMethod(ctx, field, obj) + return res + }) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var userOverrideImplementors = []string{"UserOverride"} + +func (ec *executionContext) _UserOverride(ctx context.Context, sel ast.SelectionSet, obj *override.UserOverride) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, userOverrideImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("UserOverride") + case "id": + out.Values[i] = ec._UserOverride_id(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "start": + out.Values[i] = ec._UserOverride_start(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "end": + out.Values[i] = ec._UserOverride_end(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "addUserID": + out.Values[i] = ec._UserOverride_addUserID(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "removeUserID": + out.Values[i] = ec._UserOverride_removeUserID(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "addUser": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._UserOverride_addUser(ctx, field, obj) + return res + }) + case "removeUser": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._UserOverride_removeUser(ctx, field, obj) + return res + }) + case "target": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._UserOverride_target(ctx, field, obj) + if res == graphql.Null { + invalid = true + } + return res + }) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var userOverrideConnectionImplementors = []string{"UserOverrideConnection"} + +func (ec *executionContext) _UserOverrideConnection(ctx context.Context, sel ast.SelectionSet, obj *UserOverrideConnection) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, userOverrideConnectionImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("UserOverrideConnection") + case "nodes": + out.Values[i] = ec._UserOverrideConnection_nodes(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "pageInfo": + out.Values[i] = ec._UserOverrideConnection_pageInfo(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var __DirectiveImplementors = []string{"__Directive"} + +func (ec *executionContext) ___Directive(ctx context.Context, sel ast.SelectionSet, obj *introspection.Directive) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, __DirectiveImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("__Directive") + case "name": + out.Values[i] = ec.___Directive_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "description": + out.Values[i] = ec.___Directive_description(ctx, field, obj) + case "locations": + out.Values[i] = ec.___Directive_locations(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "args": + out.Values[i] = ec.___Directive_args(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var __EnumValueImplementors = []string{"__EnumValue"} + +func (ec *executionContext) ___EnumValue(ctx context.Context, sel ast.SelectionSet, obj *introspection.EnumValue) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, __EnumValueImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("__EnumValue") + case "name": + out.Values[i] = ec.___EnumValue_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "description": + out.Values[i] = ec.___EnumValue_description(ctx, field, obj) + case "isDeprecated": + out.Values[i] = ec.___EnumValue_isDeprecated(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "deprecationReason": + out.Values[i] = ec.___EnumValue_deprecationReason(ctx, field, obj) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var __FieldImplementors = []string{"__Field"} + +func (ec *executionContext) ___Field(ctx context.Context, sel ast.SelectionSet, obj *introspection.Field) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, __FieldImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("__Field") + case "name": + out.Values[i] = ec.___Field_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "description": + out.Values[i] = ec.___Field_description(ctx, field, obj) + case "args": + out.Values[i] = ec.___Field_args(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "type": + out.Values[i] = ec.___Field_type(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "isDeprecated": + out.Values[i] = ec.___Field_isDeprecated(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "deprecationReason": + out.Values[i] = ec.___Field_deprecationReason(ctx, field, obj) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var __InputValueImplementors = []string{"__InputValue"} + +func (ec *executionContext) ___InputValue(ctx context.Context, sel ast.SelectionSet, obj *introspection.InputValue) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, __InputValueImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("__InputValue") + case "name": + out.Values[i] = ec.___InputValue_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "description": + out.Values[i] = ec.___InputValue_description(ctx, field, obj) + case "type": + out.Values[i] = ec.___InputValue_type(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "defaultValue": + out.Values[i] = ec.___InputValue_defaultValue(ctx, field, obj) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var __SchemaImplementors = []string{"__Schema"} + +func (ec *executionContext) ___Schema(ctx context.Context, sel ast.SelectionSet, obj *introspection.Schema) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, __SchemaImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("__Schema") + case "types": + out.Values[i] = ec.___Schema_types(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "queryType": + out.Values[i] = ec.___Schema_queryType(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "mutationType": + out.Values[i] = ec.___Schema_mutationType(ctx, field, obj) + case "subscriptionType": + out.Values[i] = ec.___Schema_subscriptionType(ctx, field, obj) + case "directives": + out.Values[i] = ec.___Schema_directives(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +var __TypeImplementors = []string{"__Type"} + +func (ec *executionContext) ___Type(ctx context.Context, sel ast.SelectionSet, obj *introspection.Type) graphql.Marshaler { + fields := graphql.CollectFields(ctx, sel, __TypeImplementors) + + out := graphql.NewFieldSet(fields) + invalid := false + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("__Type") + case "kind": + out.Values[i] = ec.___Type_kind(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalid = true + } + case "name": + out.Values[i] = ec.___Type_name(ctx, field, obj) + case "description": + out.Values[i] = ec.___Type_description(ctx, field, obj) + case "fields": + out.Values[i] = ec.___Type_fields(ctx, field, obj) + case "interfaces": + out.Values[i] = ec.___Type_interfaces(ctx, field, obj) + case "possibleTypes": + out.Values[i] = ec.___Type_possibleTypes(ctx, field, obj) + case "enumValues": + out.Values[i] = ec.___Type_enumValues(ctx, field, obj) + case "inputFields": + out.Values[i] = ec.___Type_inputFields(ctx, field, obj) + case "ofType": + out.Values[i] = ec.___Type_ofType(ctx, field, obj) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalid { + return graphql.Null + } + return out +} + +// endregion **************************** object.gotpl **************************** + +// region ***************************** type.gotpl ***************************** + +func (ec *executionContext) marshalNAlert2githubᚗcomᚋtargetᚋgoalertᚋalertᚐAlert(ctx context.Context, sel ast.SelectionSet, v alert.Alert) graphql.Marshaler { + return ec._Alert(ctx, sel, &v) +} + +func (ec *executionContext) marshalNAlert2ᚕgithub.comᚋtargetᚋgoalertᚋalertᚐAlert(ctx context.Context, sel ast.SelectionSet, v []alert.Alert) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNAlert2githubᚗcomᚋtargetᚋgoalertᚋalertᚐAlert(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalNAlertConnection2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐAlertConnection(ctx context.Context, sel ast.SelectionSet, v AlertConnection) graphql.Marshaler { + return ec._AlertConnection(ctx, sel, &v) +} + +func (ec *executionContext) marshalNAlertConnection2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐAlertConnection(ctx context.Context, sel ast.SelectionSet, v *AlertConnection) graphql.Marshaler { + if v == nil { + if !ec.HasError(graphql.GetResolverContext(ctx)) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + return ec._AlertConnection(ctx, sel, v) +} + +func (ec *executionContext) unmarshalNAlertStatus2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐAlertStatus(ctx context.Context, v interface{}) (AlertStatus, error) { + var res AlertStatus + return res, res.UnmarshalGQL(v) +} + +func (ec *executionContext) marshalNAlertStatus2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐAlertStatus(ctx context.Context, sel ast.SelectionSet, v AlertStatus) graphql.Marshaler { + return v +} + +func (ec *executionContext) marshalNAuthSubject2githubᚗcomᚋtargetᚋgoalertᚋuserᚐAuthSubject(ctx context.Context, sel ast.SelectionSet, v user.AuthSubject) graphql.Marshaler { + return ec._AuthSubject(ctx, sel, &v) +} + +func (ec *executionContext) marshalNAuthSubject2ᚕgithub.comᚋtargetᚋgoalertᚋuserᚐAuthSubject(ctx context.Context, sel ast.SelectionSet, v []user.AuthSubject) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNAuthSubject2githubᚗcomᚋtargetᚋgoalertᚋuserᚐAuthSubject(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalNAuthSubjectConnection2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐAuthSubjectConnection(ctx context.Context, sel ast.SelectionSet, v AuthSubjectConnection) graphql.Marshaler { + return ec._AuthSubjectConnection(ctx, sel, &v) +} + +func (ec *executionContext) marshalNAuthSubjectConnection2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐAuthSubjectConnection(ctx context.Context, sel ast.SelectionSet, v *AuthSubjectConnection) graphql.Marshaler { + if v == nil { + if !ec.HasError(graphql.GetResolverContext(ctx)) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + return ec._AuthSubjectConnection(ctx, sel, v) +} + +func (ec *executionContext) unmarshalNAuthSubjectInput2githubᚗcomᚋtargetᚋgoalertᚋuserᚐAuthSubject(ctx context.Context, v interface{}) (user.AuthSubject, error) { + return ec.unmarshalInputAuthSubjectInput(ctx, v) +} + +func (ec *executionContext) unmarshalNBoolean2bool(ctx context.Context, v interface{}) (bool, error) { + return graphql.UnmarshalBoolean(v) +} + +func (ec *executionContext) marshalNBoolean2bool(ctx context.Context, sel ast.SelectionSet, v bool) graphql.Marshaler { + return graphql.MarshalBoolean(v) +} + +func (ec *executionContext) unmarshalNBoolean2ᚕbool(ctx context.Context, v interface{}) ([]bool, error) { + var vSlice []interface{} + if v != nil { + if tmp1, ok := v.([]interface{}); ok { + vSlice = tmp1 + } else { + vSlice = []interface{}{v} + } + } + var err error + res := make([]bool, len(vSlice)) + for i := range vSlice { + res[i], err = ec.unmarshalNBoolean2bool(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalNBoolean2ᚕbool(ctx context.Context, sel ast.SelectionSet, v []bool) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + for i := range v { + ret[i] = ec.marshalNBoolean2bool(ctx, sel, v[i]) + } + + return ret +} + +func (ec *executionContext) unmarshalNClockTime2githubᚗcomᚋtargetᚋgoalertᚋscheduleᚋruleᚐClock(ctx context.Context, v interface{}) (rule.Clock, error) { + return UnmarshalClockTime(v) +} + +func (ec *executionContext) marshalNClockTime2githubᚗcomᚋtargetᚋgoalertᚋscheduleᚋruleᚐClock(ctx context.Context, sel ast.SelectionSet, v rule.Clock) graphql.Marshaler { + return MarshalClockTime(v) +} + +func (ec *executionContext) unmarshalNConfigType2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐConfigType(ctx context.Context, v interface{}) (ConfigType, error) { + var res ConfigType + return res, res.UnmarshalGQL(v) +} + +func (ec *executionContext) marshalNConfigType2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐConfigType(ctx context.Context, sel ast.SelectionSet, v ConfigType) graphql.Marshaler { + return v +} + +func (ec *executionContext) marshalNConfigValue2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐConfigValue(ctx context.Context, sel ast.SelectionSet, v ConfigValue) graphql.Marshaler { + return ec._ConfigValue(ctx, sel, &v) +} + +func (ec *executionContext) marshalNConfigValue2ᚕgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐConfigValue(ctx context.Context, sel ast.SelectionSet, v []ConfigValue) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNConfigValue2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐConfigValue(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) unmarshalNConfigValueInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐConfigValueInput(ctx context.Context, v interface{}) (ConfigValueInput, error) { + return ec.unmarshalInputConfigValueInput(ctx, v) +} + +func (ec *executionContext) unmarshalNContactMethodType2githubᚗcomᚋtargetᚋgoalertᚋuserᚋcontactmethodᚐType(ctx context.Context, v interface{}) (contactmethod.Type, error) { + return UnmarshalContactMethodType(v) +} + +func (ec *executionContext) marshalNContactMethodType2githubᚗcomᚋtargetᚋgoalertᚋuserᚋcontactmethodᚐType(ctx context.Context, sel ast.SelectionSet, v contactmethod.Type) graphql.Marshaler { + return MarshalContactMethodType(v) +} + +func (ec *executionContext) unmarshalNCreateEscalationPolicyInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateEscalationPolicyInput(ctx context.Context, v interface{}) (CreateEscalationPolicyInput, error) { + return ec.unmarshalInputCreateEscalationPolicyInput(ctx, v) +} + +func (ec *executionContext) unmarshalNCreateEscalationPolicyStepInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateEscalationPolicyStepInput(ctx context.Context, v interface{}) (CreateEscalationPolicyStepInput, error) { + return ec.unmarshalInputCreateEscalationPolicyStepInput(ctx, v) +} + +func (ec *executionContext) unmarshalNCreateIntegrationKeyInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateIntegrationKeyInput(ctx context.Context, v interface{}) (CreateIntegrationKeyInput, error) { + return ec.unmarshalInputCreateIntegrationKeyInput(ctx, v) +} + +func (ec *executionContext) unmarshalNCreateRotationInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateRotationInput(ctx context.Context, v interface{}) (CreateRotationInput, error) { + return ec.unmarshalInputCreateRotationInput(ctx, v) +} + +func (ec *executionContext) unmarshalNCreateScheduleInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateScheduleInput(ctx context.Context, v interface{}) (CreateScheduleInput, error) { + return ec.unmarshalInputCreateScheduleInput(ctx, v) +} + +func (ec *executionContext) unmarshalNCreateServiceInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateServiceInput(ctx context.Context, v interface{}) (CreateServiceInput, error) { + return ec.unmarshalInputCreateServiceInput(ctx, v) +} + +func (ec *executionContext) unmarshalNCreateUserContactMethodInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateUserContactMethodInput(ctx context.Context, v interface{}) (CreateUserContactMethodInput, error) { + return ec.unmarshalInputCreateUserContactMethodInput(ctx, v) +} + +func (ec *executionContext) unmarshalNCreateUserNotificationRuleInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateUserNotificationRuleInput(ctx context.Context, v interface{}) (CreateUserNotificationRuleInput, error) { + return ec.unmarshalInputCreateUserNotificationRuleInput(ctx, v) +} + +func (ec *executionContext) unmarshalNCreateUserOverrideInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateUserOverrideInput(ctx context.Context, v interface{}) (CreateUserOverrideInput, error) { + return ec.unmarshalInputCreateUserOverrideInput(ctx, v) +} + +func (ec *executionContext) marshalNEscalationPolicy2githubᚗcomᚋtargetᚋgoalertᚋescalationᚐPolicy(ctx context.Context, sel ast.SelectionSet, v escalation.Policy) graphql.Marshaler { + return ec._EscalationPolicy(ctx, sel, &v) +} + +func (ec *executionContext) marshalNEscalationPolicy2ᚕgithub.comᚋtargetᚋgoalertᚋescalationᚐPolicy(ctx context.Context, sel ast.SelectionSet, v []escalation.Policy) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNEscalationPolicy2githubᚗcomᚋtargetᚋgoalertᚋescalationᚐPolicy(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalNEscalationPolicyConnection2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐEscalationPolicyConnection(ctx context.Context, sel ast.SelectionSet, v EscalationPolicyConnection) graphql.Marshaler { + return ec._EscalationPolicyConnection(ctx, sel, &v) +} + +func (ec *executionContext) marshalNEscalationPolicyConnection2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐEscalationPolicyConnection(ctx context.Context, sel ast.SelectionSet, v *EscalationPolicyConnection) graphql.Marshaler { + if v == nil { + if !ec.HasError(graphql.GetResolverContext(ctx)) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + return ec._EscalationPolicyConnection(ctx, sel, v) +} + +func (ec *executionContext) marshalNEscalationPolicyStep2githubᚗcomᚋtargetᚋgoalertᚋescalationᚐStep(ctx context.Context, sel ast.SelectionSet, v escalation.Step) graphql.Marshaler { + return ec._EscalationPolicyStep(ctx, sel, &v) +} + +func (ec *executionContext) marshalNEscalationPolicyStep2ᚕgithub.comᚋtargetᚋgoalertᚋescalationᚐStep(ctx context.Context, sel ast.SelectionSet, v []escalation.Step) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNEscalationPolicyStep2githubᚗcomᚋtargetᚋgoalertᚋescalationᚐStep(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) unmarshalNID2int(ctx context.Context, v interface{}) (int, error) { + return graphql.UnmarshalIntID(v) +} + +func (ec *executionContext) marshalNID2int(ctx context.Context, sel ast.SelectionSet, v int) graphql.Marshaler { + return graphql.MarshalIntID(v) +} + +func (ec *executionContext) unmarshalNID2string(ctx context.Context, v interface{}) (string, error) { + return graphql.UnmarshalID(v) +} + +func (ec *executionContext) marshalNID2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler { + return graphql.MarshalID(v) +} + +func (ec *executionContext) unmarshalNID2ᚕstring(ctx context.Context, v interface{}) ([]string, error) { + var vSlice []interface{} + if v != nil { + if tmp1, ok := v.([]interface{}); ok { + vSlice = tmp1 + } else { + vSlice = []interface{}{v} + } + } + var err error + res := make([]string, len(vSlice)) + for i := range vSlice { + res[i], err = ec.unmarshalNID2string(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalNID2ᚕstring(ctx context.Context, sel ast.SelectionSet, v []string) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + for i := range v { + ret[i] = ec.marshalNID2string(ctx, sel, v[i]) + } + + return ret +} + +func (ec *executionContext) unmarshalNISOTimestamp2timeᚐTime(ctx context.Context, v interface{}) (time.Time, error) { + return UnmarshalISOTimestamp(v) +} + +func (ec *executionContext) marshalNISOTimestamp2timeᚐTime(ctx context.Context, sel ast.SelectionSet, v time.Time) graphql.Marshaler { + if v.IsZero() { + if !ec.HasError(graphql.GetResolverContext(ctx)) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + return MarshalISOTimestamp(v) +} + +func (ec *executionContext) unmarshalNISOTimestamp2ᚕtimeᚐTime(ctx context.Context, v interface{}) ([]time.Time, error) { + var vSlice []interface{} + if v != nil { + if tmp1, ok := v.([]interface{}); ok { + vSlice = tmp1 + } else { + vSlice = []interface{}{v} + } + } + var err error + res := make([]time.Time, len(vSlice)) + for i := range vSlice { + res[i], err = ec.unmarshalNISOTimestamp2timeᚐTime(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalNISOTimestamp2ᚕtimeᚐTime(ctx context.Context, sel ast.SelectionSet, v []time.Time) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + for i := range v { + ret[i] = ec.marshalNISOTimestamp2timeᚐTime(ctx, sel, v[i]) + } + + return ret +} + +func (ec *executionContext) unmarshalNInt2int(ctx context.Context, v interface{}) (int, error) { + return graphql.UnmarshalInt(v) +} + +func (ec *executionContext) marshalNInt2int(ctx context.Context, sel ast.SelectionSet, v int) graphql.Marshaler { + return graphql.MarshalInt(v) +} + +func (ec *executionContext) unmarshalNInt2ᚕint(ctx context.Context, v interface{}) ([]int, error) { + var vSlice []interface{} + if v != nil { + if tmp1, ok := v.([]interface{}); ok { + vSlice = tmp1 + } else { + vSlice = []interface{}{v} + } + } + var err error + res := make([]int, len(vSlice)) + for i := range vSlice { + res[i], err = ec.unmarshalNInt2int(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalNInt2ᚕint(ctx context.Context, sel ast.SelectionSet, v []int) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + for i := range v { + ret[i] = ec.marshalNInt2int(ctx, sel, v[i]) + } + + return ret +} + +func (ec *executionContext) marshalNIntegrationKey2githubᚗcomᚋtargetᚋgoalertᚋintegrationkeyᚐIntegrationKey(ctx context.Context, sel ast.SelectionSet, v integrationkey.IntegrationKey) graphql.Marshaler { + return ec._IntegrationKey(ctx, sel, &v) +} + +func (ec *executionContext) marshalNIntegrationKey2ᚕgithub.comᚋtargetᚋgoalertᚋintegrationkeyᚐIntegrationKey(ctx context.Context, sel ast.SelectionSet, v []integrationkey.IntegrationKey) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNIntegrationKey2githubᚗcomᚋtargetᚋgoalertᚋintegrationkeyᚐIntegrationKey(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) unmarshalNIntegrationKeyType2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐIntegrationKeyType(ctx context.Context, v interface{}) (IntegrationKeyType, error) { + var res IntegrationKeyType + return res, res.UnmarshalGQL(v) +} + +func (ec *executionContext) marshalNIntegrationKeyType2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐIntegrationKeyType(ctx context.Context, sel ast.SelectionSet, v IntegrationKeyType) graphql.Marshaler { + return v +} + +func (ec *executionContext) marshalNLabel2githubᚗcomᚋtargetᚋgoalertᚋlabelᚐLabel(ctx context.Context, sel ast.SelectionSet, v label.Label) graphql.Marshaler { + return ec._Label(ctx, sel, &v) +} + +func (ec *executionContext) marshalNLabel2ᚕgithub.comᚋtargetᚋgoalertᚋlabelᚐLabel(ctx context.Context, sel ast.SelectionSet, v []label.Label) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNLabel2githubᚗcomᚋtargetᚋgoalertᚋlabelᚐLabel(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalNLabelConnection2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐLabelConnection(ctx context.Context, sel ast.SelectionSet, v LabelConnection) graphql.Marshaler { + return ec._LabelConnection(ctx, sel, &v) +} + +func (ec *executionContext) marshalNLabelConnection2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐLabelConnection(ctx context.Context, sel ast.SelectionSet, v *LabelConnection) graphql.Marshaler { + if v == nil { + if !ec.HasError(graphql.GetResolverContext(ctx)) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + return ec._LabelConnection(ctx, sel, v) +} + +func (ec *executionContext) marshalNOnCallShift2githubᚗcomᚋtargetᚋgoalertᚋoncallᚐShift(ctx context.Context, sel ast.SelectionSet, v oncall.Shift) graphql.Marshaler { + return ec._OnCallShift(ctx, sel, &v) +} + +func (ec *executionContext) marshalNOnCallShift2ᚕgithub.comᚋtargetᚋgoalertᚋoncallᚐShift(ctx context.Context, sel ast.SelectionSet, v []oncall.Shift) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNOnCallShift2githubᚗcomᚋtargetᚋgoalertᚋoncallᚐShift(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalNPageInfo2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐPageInfo(ctx context.Context, sel ast.SelectionSet, v PageInfo) graphql.Marshaler { + return ec._PageInfo(ctx, sel, &v) +} + +func (ec *executionContext) marshalNRotation2githubᚗcomᚋtargetᚋgoalertᚋscheduleᚋrotationᚐRotation(ctx context.Context, sel ast.SelectionSet, v rotation.Rotation) graphql.Marshaler { + return ec._Rotation(ctx, sel, &v) +} + +func (ec *executionContext) marshalNRotation2ᚕgithub.comᚋtargetᚋgoalertᚋscheduleᚋrotationᚐRotation(ctx context.Context, sel ast.SelectionSet, v []rotation.Rotation) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNRotation2githubᚗcomᚋtargetᚋgoalertᚋscheduleᚋrotationᚐRotation(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalNRotationConnection2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐRotationConnection(ctx context.Context, sel ast.SelectionSet, v RotationConnection) graphql.Marshaler { + return ec._RotationConnection(ctx, sel, &v) +} + +func (ec *executionContext) marshalNRotationConnection2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐRotationConnection(ctx context.Context, sel ast.SelectionSet, v *RotationConnection) graphql.Marshaler { + if v == nil { + if !ec.HasError(graphql.GetResolverContext(ctx)) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + return ec._RotationConnection(ctx, sel, v) +} + +func (ec *executionContext) unmarshalNRotationType2githubᚗcomᚋtargetᚋgoalertᚋscheduleᚋrotationᚐType(ctx context.Context, v interface{}) (rotation.Type, error) { + var res rotation.Type + return res, res.UnmarshalGQL(v) +} + +func (ec *executionContext) marshalNRotationType2githubᚗcomᚋtargetᚋgoalertᚋscheduleᚋrotationᚐType(ctx context.Context, sel ast.SelectionSet, v rotation.Type) graphql.Marshaler { + return v +} + +func (ec *executionContext) marshalNSchedule2githubᚗcomᚋtargetᚋgoalertᚋscheduleᚐSchedule(ctx context.Context, sel ast.SelectionSet, v schedule.Schedule) graphql.Marshaler { + return ec._Schedule(ctx, sel, &v) +} + +func (ec *executionContext) marshalNSchedule2ᚕgithub.comᚋtargetᚋgoalertᚋscheduleᚐSchedule(ctx context.Context, sel ast.SelectionSet, v []schedule.Schedule) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNSchedule2githubᚗcomᚋtargetᚋgoalertᚋscheduleᚐSchedule(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalNScheduleConnection2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐScheduleConnection(ctx context.Context, sel ast.SelectionSet, v ScheduleConnection) graphql.Marshaler { + return ec._ScheduleConnection(ctx, sel, &v) +} + +func (ec *executionContext) marshalNScheduleConnection2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐScheduleConnection(ctx context.Context, sel ast.SelectionSet, v *ScheduleConnection) graphql.Marshaler { + if v == nil { + if !ec.HasError(graphql.GetResolverContext(ctx)) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + return ec._ScheduleConnection(ctx, sel, v) +} + +func (ec *executionContext) marshalNScheduleRule2githubᚗcomᚋtargetᚋgoalertᚋscheduleᚋruleᚐRule(ctx context.Context, sel ast.SelectionSet, v rule.Rule) graphql.Marshaler { + return ec._ScheduleRule(ctx, sel, &v) +} + +func (ec *executionContext) marshalNScheduleRule2ᚕgithub.comᚋtargetᚋgoalertᚋscheduleᚋruleᚐRule(ctx context.Context, sel ast.SelectionSet, v []rule.Rule) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNScheduleRule2githubᚗcomᚋtargetᚋgoalertᚋscheduleᚋruleᚐRule(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) unmarshalNScheduleRuleInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐScheduleRuleInput(ctx context.Context, v interface{}) (ScheduleRuleInput, error) { + return ec.unmarshalInputScheduleRuleInput(ctx, v) +} + +func (ec *executionContext) unmarshalNScheduleRuleInput2ᚕgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐScheduleRuleInput(ctx context.Context, v interface{}) ([]ScheduleRuleInput, error) { + var vSlice []interface{} + if v != nil { + if tmp1, ok := v.([]interface{}); ok { + vSlice = tmp1 + } else { + vSlice = []interface{}{v} + } + } + var err error + res := make([]ScheduleRuleInput, len(vSlice)) + for i := range vSlice { + res[i], err = ec.unmarshalNScheduleRuleInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐScheduleRuleInput(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalNScheduleTarget2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐScheduleTarget(ctx context.Context, sel ast.SelectionSet, v ScheduleTarget) graphql.Marshaler { + return ec._ScheduleTarget(ctx, sel, &v) +} + +func (ec *executionContext) marshalNScheduleTarget2ᚕgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐScheduleTarget(ctx context.Context, sel ast.SelectionSet, v []ScheduleTarget) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNScheduleTarget2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐScheduleTarget(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) unmarshalNScheduleTargetInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐScheduleTargetInput(ctx context.Context, v interface{}) (ScheduleTargetInput, error) { + return ec.unmarshalInputScheduleTargetInput(ctx, v) +} + +func (ec *executionContext) marshalNService2githubᚗcomᚋtargetᚋgoalertᚋserviceᚐService(ctx context.Context, sel ast.SelectionSet, v service.Service) graphql.Marshaler { + return ec._Service(ctx, sel, &v) +} + +func (ec *executionContext) marshalNService2ᚕgithub.comᚋtargetᚋgoalertᚋserviceᚐService(ctx context.Context, sel ast.SelectionSet, v []service.Service) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNService2githubᚗcomᚋtargetᚋgoalertᚋserviceᚐService(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalNServiceConnection2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐServiceConnection(ctx context.Context, sel ast.SelectionSet, v ServiceConnection) graphql.Marshaler { + return ec._ServiceConnection(ctx, sel, &v) +} + +func (ec *executionContext) marshalNServiceConnection2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐServiceConnection(ctx context.Context, sel ast.SelectionSet, v *ServiceConnection) graphql.Marshaler { + if v == nil { + if !ec.HasError(graphql.GetResolverContext(ctx)) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + return ec._ServiceConnection(ctx, sel, v) +} + +func (ec *executionContext) marshalNServiceOnCallUser2githubᚗcomᚋtargetᚋgoalertᚋoncallᚐServiceOnCallUser(ctx context.Context, sel ast.SelectionSet, v oncall.ServiceOnCallUser) graphql.Marshaler { + return ec._ServiceOnCallUser(ctx, sel, &v) +} + +func (ec *executionContext) marshalNServiceOnCallUser2ᚕgithub.comᚋtargetᚋgoalertᚋoncallᚐServiceOnCallUser(ctx context.Context, sel ast.SelectionSet, v []oncall.ServiceOnCallUser) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNServiceOnCallUser2githubᚗcomᚋtargetᚋgoalertᚋoncallᚐServiceOnCallUser(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) unmarshalNSetFavoriteInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐSetFavoriteInput(ctx context.Context, v interface{}) (SetFavoriteInput, error) { + return ec.unmarshalInputSetFavoriteInput(ctx, v) +} + +func (ec *executionContext) unmarshalNSetLabelInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐSetLabelInput(ctx context.Context, v interface{}) (SetLabelInput, error) { + return ec.unmarshalInputSetLabelInput(ctx, v) +} + +func (ec *executionContext) marshalNSlackChannel2githubᚗcomᚋtargetᚋgoalertᚋnotificationᚋslackᚐChannel(ctx context.Context, sel ast.SelectionSet, v slack.Channel) graphql.Marshaler { + return ec._SlackChannel(ctx, sel, &v) +} + +func (ec *executionContext) marshalNSlackChannel2ᚕgithub.comᚋtargetᚋgoalertᚋnotificationᚋslackᚐChannel(ctx context.Context, sel ast.SelectionSet, v []slack.Channel) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNSlackChannel2githubᚗcomᚋtargetᚋgoalertᚋnotificationᚋslackᚐChannel(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalNSlackChannelConnection2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐSlackChannelConnection(ctx context.Context, sel ast.SelectionSet, v SlackChannelConnection) graphql.Marshaler { + return ec._SlackChannelConnection(ctx, sel, &v) +} + +func (ec *executionContext) marshalNSlackChannelConnection2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐSlackChannelConnection(ctx context.Context, sel ast.SelectionSet, v *SlackChannelConnection) graphql.Marshaler { + if v == nil { + if !ec.HasError(graphql.GetResolverContext(ctx)) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + return ec._SlackChannelConnection(ctx, sel, v) +} + +func (ec *executionContext) unmarshalNString2string(ctx context.Context, v interface{}) (string, error) { + return graphql.UnmarshalString(v) +} + +func (ec *executionContext) marshalNString2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler { + return graphql.MarshalString(v) +} + +func (ec *executionContext) marshalNTarget2githubᚗcomᚋtargetᚋgoalertᚋassignmentᚐRawTarget(ctx context.Context, sel ast.SelectionSet, v assignment.RawTarget) graphql.Marshaler { + return ec._Target(ctx, sel, &v) +} + +func (ec *executionContext) marshalNTarget2ᚕgithub.comᚋtargetᚋgoalertᚋassignmentᚐRawTarget(ctx context.Context, sel ast.SelectionSet, v []assignment.RawTarget) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNTarget2githubᚗcomᚋtargetᚋgoalertᚋassignmentᚐRawTarget(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalNTarget2ᚖgithub.comᚋtargetᚋgoalertᚋassignmentᚐRawTarget(ctx context.Context, sel ast.SelectionSet, v *assignment.RawTarget) graphql.Marshaler { + if v == nil { + if !ec.HasError(graphql.GetResolverContext(ctx)) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + return ec._Target(ctx, sel, v) +} + +func (ec *executionContext) unmarshalNTargetInput2githubᚗcomᚋtargetᚋgoalertᚋassignmentᚐRawTarget(ctx context.Context, v interface{}) (assignment.RawTarget, error) { + return ec.unmarshalInputTargetInput(ctx, v) +} + +func (ec *executionContext) unmarshalNTargetType2githubᚗcomᚋtargetᚋgoalertᚋassignmentᚐTargetType(ctx context.Context, v interface{}) (assignment.TargetType, error) { + var res assignment.TargetType + return res, res.UnmarshalGQL(v) +} + +func (ec *executionContext) marshalNTargetType2githubᚗcomᚋtargetᚋgoalertᚋassignmentᚐTargetType(ctx context.Context, sel ast.SelectionSet, v assignment.TargetType) graphql.Marshaler { + return v +} + +func (ec *executionContext) marshalNTimeZone2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐTimeZone(ctx context.Context, sel ast.SelectionSet, v TimeZone) graphql.Marshaler { + return ec._TimeZone(ctx, sel, &v) +} + +func (ec *executionContext) marshalNTimeZone2ᚕgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐTimeZone(ctx context.Context, sel ast.SelectionSet, v []TimeZone) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNTimeZone2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐTimeZone(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalNTimeZoneConnection2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐTimeZoneConnection(ctx context.Context, sel ast.SelectionSet, v TimeZoneConnection) graphql.Marshaler { + return ec._TimeZoneConnection(ctx, sel, &v) +} + +func (ec *executionContext) marshalNTimeZoneConnection2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐTimeZoneConnection(ctx context.Context, sel ast.SelectionSet, v *TimeZoneConnection) graphql.Marshaler { + if v == nil { + if !ec.HasError(graphql.GetResolverContext(ctx)) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + return ec._TimeZoneConnection(ctx, sel, v) +} + +func (ec *executionContext) unmarshalNUpdateAlertsInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUpdateAlertsInput(ctx context.Context, v interface{}) (UpdateAlertsInput, error) { + return ec.unmarshalInputUpdateAlertsInput(ctx, v) +} + +func (ec *executionContext) unmarshalNUpdateEscalationPolicyInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUpdateEscalationPolicyInput(ctx context.Context, v interface{}) (UpdateEscalationPolicyInput, error) { + return ec.unmarshalInputUpdateEscalationPolicyInput(ctx, v) +} + +func (ec *executionContext) unmarshalNUpdateEscalationPolicyStepInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUpdateEscalationPolicyStepInput(ctx context.Context, v interface{}) (UpdateEscalationPolicyStepInput, error) { + return ec.unmarshalInputUpdateEscalationPolicyStepInput(ctx, v) +} + +func (ec *executionContext) unmarshalNUpdateRotationInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUpdateRotationInput(ctx context.Context, v interface{}) (UpdateRotationInput, error) { + return ec.unmarshalInputUpdateRotationInput(ctx, v) +} + +func (ec *executionContext) unmarshalNUpdateScheduleInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUpdateScheduleInput(ctx context.Context, v interface{}) (UpdateScheduleInput, error) { + return ec.unmarshalInputUpdateScheduleInput(ctx, v) +} + +func (ec *executionContext) unmarshalNUpdateServiceInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUpdateServiceInput(ctx context.Context, v interface{}) (UpdateServiceInput, error) { + return ec.unmarshalInputUpdateServiceInput(ctx, v) +} + +func (ec *executionContext) unmarshalNUpdateUserContactMethodInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUpdateUserContactMethodInput(ctx context.Context, v interface{}) (UpdateUserContactMethodInput, error) { + return ec.unmarshalInputUpdateUserContactMethodInput(ctx, v) +} + +func (ec *executionContext) unmarshalNUpdateUserInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUpdateUserInput(ctx context.Context, v interface{}) (UpdateUserInput, error) { + return ec.unmarshalInputUpdateUserInput(ctx, v) +} + +func (ec *executionContext) unmarshalNUpdateUserOverrideInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUpdateUserOverrideInput(ctx context.Context, v interface{}) (UpdateUserOverrideInput, error) { + return ec.unmarshalInputUpdateUserOverrideInput(ctx, v) +} + +func (ec *executionContext) marshalNUser2githubᚗcomᚋtargetᚋgoalertᚋuserᚐUser(ctx context.Context, sel ast.SelectionSet, v user.User) graphql.Marshaler { + return ec._User(ctx, sel, &v) +} + +func (ec *executionContext) marshalNUser2ᚕgithub.comᚋtargetᚋgoalertᚋuserᚐUser(ctx context.Context, sel ast.SelectionSet, v []user.User) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNUser2githubᚗcomᚋtargetᚋgoalertᚋuserᚐUser(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalNUserConnection2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUserConnection(ctx context.Context, sel ast.SelectionSet, v UserConnection) graphql.Marshaler { + return ec._UserConnection(ctx, sel, &v) +} + +func (ec *executionContext) marshalNUserConnection2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐUserConnection(ctx context.Context, sel ast.SelectionSet, v *UserConnection) graphql.Marshaler { + if v == nil { + if !ec.HasError(graphql.GetResolverContext(ctx)) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + return ec._UserConnection(ctx, sel, v) +} + +func (ec *executionContext) marshalNUserContactMethod2githubᚗcomᚋtargetᚋgoalertᚋuserᚋcontactmethodᚐContactMethod(ctx context.Context, sel ast.SelectionSet, v contactmethod.ContactMethod) graphql.Marshaler { + return ec._UserContactMethod(ctx, sel, &v) +} + +func (ec *executionContext) marshalNUserContactMethod2ᚕgithub.comᚋtargetᚋgoalertᚋuserᚋcontactmethodᚐContactMethod(ctx context.Context, sel ast.SelectionSet, v []contactmethod.ContactMethod) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNUserContactMethod2githubᚗcomᚋtargetᚋgoalertᚋuserᚋcontactmethodᚐContactMethod(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalNUserNotificationRule2githubᚗcomᚋtargetᚋgoalertᚋuserᚋnotificationruleᚐNotificationRule(ctx context.Context, sel ast.SelectionSet, v notificationrule.NotificationRule) graphql.Marshaler { + return ec._UserNotificationRule(ctx, sel, &v) +} + +func (ec *executionContext) marshalNUserNotificationRule2ᚕgithub.comᚋtargetᚋgoalertᚋuserᚋnotificationruleᚐNotificationRule(ctx context.Context, sel ast.SelectionSet, v []notificationrule.NotificationRule) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNUserNotificationRule2githubᚗcomᚋtargetᚋgoalertᚋuserᚋnotificationruleᚐNotificationRule(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalNUserOverride2githubᚗcomᚋtargetᚋgoalertᚋoverrideᚐUserOverride(ctx context.Context, sel ast.SelectionSet, v override.UserOverride) graphql.Marshaler { + return ec._UserOverride(ctx, sel, &v) +} + +func (ec *executionContext) marshalNUserOverride2ᚕgithub.comᚋtargetᚋgoalertᚋoverrideᚐUserOverride(ctx context.Context, sel ast.SelectionSet, v []override.UserOverride) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNUserOverride2githubᚗcomᚋtargetᚋgoalertᚋoverrideᚐUserOverride(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalNUserOverrideConnection2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUserOverrideConnection(ctx context.Context, sel ast.SelectionSet, v UserOverrideConnection) graphql.Marshaler { + return ec._UserOverrideConnection(ctx, sel, &v) +} + +func (ec *executionContext) marshalNUserOverrideConnection2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐUserOverrideConnection(ctx context.Context, sel ast.SelectionSet, v *UserOverrideConnection) graphql.Marshaler { + if v == nil { + if !ec.HasError(graphql.GetResolverContext(ctx)) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + return ec._UserOverrideConnection(ctx, sel, v) +} + +func (ec *executionContext) unmarshalNUserRole2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUserRole(ctx context.Context, v interface{}) (UserRole, error) { + var res UserRole + return res, res.UnmarshalGQL(v) +} + +func (ec *executionContext) marshalNUserRole2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUserRole(ctx context.Context, sel ast.SelectionSet, v UserRole) graphql.Marshaler { + return v +} + +func (ec *executionContext) marshalN__Directive2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirective(ctx context.Context, sel ast.SelectionSet, v introspection.Directive) graphql.Marshaler { + return ec.___Directive(ctx, sel, &v) +} + +func (ec *executionContext) marshalN__Directive2ᚕgithub.comᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirective(ctx context.Context, sel ast.SelectionSet, v []introspection.Directive) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalN__Directive2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirective(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) unmarshalN__DirectiveLocation2string(ctx context.Context, v interface{}) (string, error) { + return graphql.UnmarshalString(v) +} + +func (ec *executionContext) marshalN__DirectiveLocation2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler { + return graphql.MarshalString(v) +} + +func (ec *executionContext) unmarshalN__DirectiveLocation2ᚕstring(ctx context.Context, v interface{}) ([]string, error) { + var vSlice []interface{} + if v != nil { + if tmp1, ok := v.([]interface{}); ok { + vSlice = tmp1 + } else { + vSlice = []interface{}{v} + } + } + var err error + res := make([]string, len(vSlice)) + for i := range vSlice { + res[i], err = ec.unmarshalN__DirectiveLocation2string(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalN__DirectiveLocation2ᚕstring(ctx context.Context, sel ast.SelectionSet, v []string) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalN__DirectiveLocation2string(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalN__EnumValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValue(ctx context.Context, sel ast.SelectionSet, v introspection.EnumValue) graphql.Marshaler { + return ec.___EnumValue(ctx, sel, &v) +} + +func (ec *executionContext) marshalN__Field2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐField(ctx context.Context, sel ast.SelectionSet, v introspection.Field) graphql.Marshaler { + return ec.___Field(ctx, sel, &v) +} + +func (ec *executionContext) marshalN__InputValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx context.Context, sel ast.SelectionSet, v introspection.InputValue) graphql.Marshaler { + return ec.___InputValue(ctx, sel, &v) +} + +func (ec *executionContext) marshalN__InputValue2ᚕgithub.comᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx context.Context, sel ast.SelectionSet, v []introspection.InputValue) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalN__InputValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalN__Type2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v introspection.Type) graphql.Marshaler { + return ec.___Type(ctx, sel, &v) +} + +func (ec *executionContext) marshalN__Type2ᚕgithub.comᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v []introspection.Type) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalN__Type2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalN__Type2ᚖgithub.comᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v *introspection.Type) graphql.Marshaler { + if v == nil { + if !ec.HasError(graphql.GetResolverContext(ctx)) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + return ec.___Type(ctx, sel, v) +} + +func (ec *executionContext) unmarshalN__TypeKind2string(ctx context.Context, v interface{}) (string, error) { + return graphql.UnmarshalString(v) +} + +func (ec *executionContext) marshalN__TypeKind2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler { + return graphql.MarshalString(v) +} + +func (ec *executionContext) marshalOAlert2githubᚗcomᚋtargetᚋgoalertᚋalertᚐAlert(ctx context.Context, sel ast.SelectionSet, v alert.Alert) graphql.Marshaler { + return ec._Alert(ctx, sel, &v) +} + +func (ec *executionContext) marshalOAlert2ᚕgithub.comᚋtargetᚋgoalertᚋalertᚐAlert(ctx context.Context, sel ast.SelectionSet, v []alert.Alert) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNAlert2githubᚗcomᚋtargetᚋgoalertᚋalertᚐAlert(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalOAlert2ᚖgithub.comᚋtargetᚋgoalertᚋalertᚐAlert(ctx context.Context, sel ast.SelectionSet, v *alert.Alert) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec._Alert(ctx, sel, v) +} + +func (ec *executionContext) unmarshalOAlertSearchOptions2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐAlertSearchOptions(ctx context.Context, v interface{}) (AlertSearchOptions, error) { + return ec.unmarshalInputAlertSearchOptions(ctx, v) +} + +func (ec *executionContext) unmarshalOAlertSearchOptions2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐAlertSearchOptions(ctx context.Context, v interface{}) (*AlertSearchOptions, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalOAlertSearchOptions2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐAlertSearchOptions(ctx, v) + return &res, err +} + +func (ec *executionContext) marshalOAlertState2githubᚗcomᚋtargetᚋgoalertᚋalertᚐState(ctx context.Context, sel ast.SelectionSet, v alert.State) graphql.Marshaler { + return ec._AlertState(ctx, sel, &v) +} + +func (ec *executionContext) marshalOAlertState2ᚖgithub.comᚋtargetᚋgoalertᚋalertᚐState(ctx context.Context, sel ast.SelectionSet, v *alert.State) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec._AlertState(ctx, sel, v) +} + +func (ec *executionContext) unmarshalOAlertStatus2ᚕgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐAlertStatus(ctx context.Context, v interface{}) ([]AlertStatus, error) { + var vSlice []interface{} + if v != nil { + if tmp1, ok := v.([]interface{}); ok { + vSlice = tmp1 + } else { + vSlice = []interface{}{v} + } + } + var err error + res := make([]AlertStatus, len(vSlice)) + for i := range vSlice { + res[i], err = ec.unmarshalNAlertStatus2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐAlertStatus(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalOAlertStatus2ᚕgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐAlertStatus(ctx context.Context, sel ast.SelectionSet, v []AlertStatus) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNAlertStatus2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐAlertStatus(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) unmarshalOBoolean2bool(ctx context.Context, v interface{}) (bool, error) { + return graphql.UnmarshalBoolean(v) +} + +func (ec *executionContext) marshalOBoolean2bool(ctx context.Context, sel ast.SelectionSet, v bool) graphql.Marshaler { + return graphql.MarshalBoolean(v) +} + +func (ec *executionContext) unmarshalOBoolean2ᚕbool(ctx context.Context, v interface{}) ([]bool, error) { + var vSlice []interface{} + if v != nil { + if tmp1, ok := v.([]interface{}); ok { + vSlice = tmp1 + } else { + vSlice = []interface{}{v} + } + } + var err error + res := make([]bool, len(vSlice)) + for i := range vSlice { + res[i], err = ec.unmarshalNBoolean2bool(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalOBoolean2ᚕbool(ctx context.Context, sel ast.SelectionSet, v []bool) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + for i := range v { + ret[i] = ec.marshalNBoolean2bool(ctx, sel, v[i]) + } + + return ret +} + +func (ec *executionContext) unmarshalOBoolean2ᚖbool(ctx context.Context, v interface{}) (*bool, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalOBoolean2bool(ctx, v) + return &res, err +} + +func (ec *executionContext) marshalOBoolean2ᚖbool(ctx context.Context, sel ast.SelectionSet, v *bool) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec.marshalOBoolean2bool(ctx, sel, *v) +} + +func (ec *executionContext) unmarshalOClockTime2githubᚗcomᚋtargetᚋgoalertᚋscheduleᚋruleᚐClock(ctx context.Context, v interface{}) (rule.Clock, error) { + return UnmarshalClockTime(v) +} + +func (ec *executionContext) marshalOClockTime2githubᚗcomᚋtargetᚋgoalertᚋscheduleᚋruleᚐClock(ctx context.Context, sel ast.SelectionSet, v rule.Clock) graphql.Marshaler { + return MarshalClockTime(v) +} + +func (ec *executionContext) unmarshalOClockTime2ᚖgithub.comᚋtargetᚋgoalertᚋscheduleᚋruleᚐClock(ctx context.Context, v interface{}) (*rule.Clock, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalOClockTime2githubᚗcomᚋtargetᚋgoalertᚋscheduleᚋruleᚐClock(ctx, v) + return &res, err +} + +func (ec *executionContext) marshalOClockTime2ᚖgithub.comᚋtargetᚋgoalertᚋscheduleᚋruleᚐClock(ctx context.Context, sel ast.SelectionSet, v *rule.Clock) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec.marshalOClockTime2githubᚗcomᚋtargetᚋgoalertᚋscheduleᚋruleᚐClock(ctx, sel, *v) +} + +func (ec *executionContext) unmarshalOConfigValueInput2ᚕgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐConfigValueInput(ctx context.Context, v interface{}) ([]ConfigValueInput, error) { + var vSlice []interface{} + if v != nil { + if tmp1, ok := v.([]interface{}); ok { + vSlice = tmp1 + } else { + vSlice = []interface{}{v} + } + } + var err error + res := make([]ConfigValueInput, len(vSlice)) + for i := range vSlice { + res[i], err = ec.unmarshalNConfigValueInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐConfigValueInput(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) unmarshalOContactMethodType2githubᚗcomᚋtargetᚋgoalertᚋuserᚋcontactmethodᚐType(ctx context.Context, v interface{}) (contactmethod.Type, error) { + return UnmarshalContactMethodType(v) +} + +func (ec *executionContext) marshalOContactMethodType2githubᚗcomᚋtargetᚋgoalertᚋuserᚋcontactmethodᚐType(ctx context.Context, sel ast.SelectionSet, v contactmethod.Type) graphql.Marshaler { + return MarshalContactMethodType(v) +} + +func (ec *executionContext) unmarshalOCreateEscalationPolicyInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateEscalationPolicyInput(ctx context.Context, v interface{}) (CreateEscalationPolicyInput, error) { + return ec.unmarshalInputCreateEscalationPolicyInput(ctx, v) +} + +func (ec *executionContext) unmarshalOCreateEscalationPolicyInput2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐCreateEscalationPolicyInput(ctx context.Context, v interface{}) (*CreateEscalationPolicyInput, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalOCreateEscalationPolicyInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateEscalationPolicyInput(ctx, v) + return &res, err +} + +func (ec *executionContext) unmarshalOCreateEscalationPolicyStepInput2ᚕgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐCreateEscalationPolicyStepInput(ctx context.Context, v interface{}) ([]CreateEscalationPolicyStepInput, error) { + var vSlice []interface{} + if v != nil { + if tmp1, ok := v.([]interface{}); ok { + vSlice = tmp1 + } else { + vSlice = []interface{}{v} + } + } + var err error + res := make([]CreateEscalationPolicyStepInput, len(vSlice)) + for i := range vSlice { + res[i], err = ec.unmarshalNCreateEscalationPolicyStepInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateEscalationPolicyStepInput(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) unmarshalOCreateIntegrationKeyInput2ᚕgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐCreateIntegrationKeyInput(ctx context.Context, v interface{}) ([]CreateIntegrationKeyInput, error) { + var vSlice []interface{} + if v != nil { + if tmp1, ok := v.([]interface{}); ok { + vSlice = tmp1 + } else { + vSlice = []interface{}{v} + } + } + var err error + res := make([]CreateIntegrationKeyInput, len(vSlice)) + for i := range vSlice { + res[i], err = ec.unmarshalNCreateIntegrationKeyInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateIntegrationKeyInput(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) unmarshalOCreateRotationInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateRotationInput(ctx context.Context, v interface{}) (CreateRotationInput, error) { + return ec.unmarshalInputCreateRotationInput(ctx, v) +} + +func (ec *executionContext) unmarshalOCreateRotationInput2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐCreateRotationInput(ctx context.Context, v interface{}) (*CreateRotationInput, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalOCreateRotationInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateRotationInput(ctx, v) + return &res, err +} + +func (ec *executionContext) unmarshalOCreateScheduleInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateScheduleInput(ctx context.Context, v interface{}) (CreateScheduleInput, error) { + return ec.unmarshalInputCreateScheduleInput(ctx, v) +} + +func (ec *executionContext) unmarshalOCreateScheduleInput2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐCreateScheduleInput(ctx context.Context, v interface{}) (*CreateScheduleInput, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalOCreateScheduleInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateScheduleInput(ctx, v) + return &res, err +} + +func (ec *executionContext) unmarshalOCreateUserNotificationRuleInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateUserNotificationRuleInput(ctx context.Context, v interface{}) (CreateUserNotificationRuleInput, error) { + return ec.unmarshalInputCreateUserNotificationRuleInput(ctx, v) +} + +func (ec *executionContext) unmarshalOCreateUserNotificationRuleInput2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐCreateUserNotificationRuleInput(ctx context.Context, v interface{}) (*CreateUserNotificationRuleInput, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalOCreateUserNotificationRuleInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐCreateUserNotificationRuleInput(ctx, v) + return &res, err +} + +func (ec *executionContext) marshalOEscalationPolicy2githubᚗcomᚋtargetᚋgoalertᚋescalationᚐPolicy(ctx context.Context, sel ast.SelectionSet, v escalation.Policy) graphql.Marshaler { + return ec._EscalationPolicy(ctx, sel, &v) +} + +func (ec *executionContext) marshalOEscalationPolicy2ᚖgithub.comᚋtargetᚋgoalertᚋescalationᚐPolicy(ctx context.Context, sel ast.SelectionSet, v *escalation.Policy) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec._EscalationPolicy(ctx, sel, v) +} + +func (ec *executionContext) unmarshalOEscalationPolicySearchOptions2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐEscalationPolicySearchOptions(ctx context.Context, v interface{}) (EscalationPolicySearchOptions, error) { + return ec.unmarshalInputEscalationPolicySearchOptions(ctx, v) +} + +func (ec *executionContext) unmarshalOEscalationPolicySearchOptions2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐEscalationPolicySearchOptions(ctx context.Context, v interface{}) (*EscalationPolicySearchOptions, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalOEscalationPolicySearchOptions2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐEscalationPolicySearchOptions(ctx, v) + return &res, err +} + +func (ec *executionContext) marshalOEscalationPolicyStep2githubᚗcomᚋtargetᚋgoalertᚋescalationᚐStep(ctx context.Context, sel ast.SelectionSet, v escalation.Step) graphql.Marshaler { + return ec._EscalationPolicyStep(ctx, sel, &v) +} + +func (ec *executionContext) marshalOEscalationPolicyStep2ᚖgithub.comᚋtargetᚋgoalertᚋescalationᚐStep(ctx context.Context, sel ast.SelectionSet, v *escalation.Step) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec._EscalationPolicyStep(ctx, sel, v) +} + +func (ec *executionContext) unmarshalOID2string(ctx context.Context, v interface{}) (string, error) { + return graphql.UnmarshalID(v) +} + +func (ec *executionContext) marshalOID2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler { + return graphql.MarshalID(v) +} + +func (ec *executionContext) unmarshalOID2ᚕstring(ctx context.Context, v interface{}) ([]string, error) { + var vSlice []interface{} + if v != nil { + if tmp1, ok := v.([]interface{}); ok { + vSlice = tmp1 + } else { + vSlice = []interface{}{v} + } + } + var err error + res := make([]string, len(vSlice)) + for i := range vSlice { + res[i], err = ec.unmarshalNID2string(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalOID2ᚕstring(ctx context.Context, sel ast.SelectionSet, v []string) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + for i := range v { + ret[i] = ec.marshalNID2string(ctx, sel, v[i]) + } + + return ret +} + +func (ec *executionContext) unmarshalOID2ᚖstring(ctx context.Context, v interface{}) (*string, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalOID2string(ctx, v) + return &res, err +} + +func (ec *executionContext) marshalOID2ᚖstring(ctx context.Context, sel ast.SelectionSet, v *string) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec.marshalOID2string(ctx, sel, *v) +} + +func (ec *executionContext) unmarshalOISOTimestamp2timeᚐTime(ctx context.Context, v interface{}) (time.Time, error) { + return UnmarshalISOTimestamp(v) +} + +func (ec *executionContext) marshalOISOTimestamp2timeᚐTime(ctx context.Context, sel ast.SelectionSet, v time.Time) graphql.Marshaler { + if v.IsZero() { + return graphql.Null + } + return MarshalISOTimestamp(v) +} + +func (ec *executionContext) unmarshalOISOTimestamp2ᚖtimeᚐTime(ctx context.Context, v interface{}) (*time.Time, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalOISOTimestamp2timeᚐTime(ctx, v) + return &res, err +} + +func (ec *executionContext) marshalOISOTimestamp2ᚖtimeᚐTime(ctx context.Context, sel ast.SelectionSet, v *time.Time) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec.marshalOISOTimestamp2timeᚐTime(ctx, sel, *v) +} + +func (ec *executionContext) unmarshalOInt2int(ctx context.Context, v interface{}) (int, error) { + return graphql.UnmarshalInt(v) +} + +func (ec *executionContext) marshalOInt2int(ctx context.Context, sel ast.SelectionSet, v int) graphql.Marshaler { + return graphql.MarshalInt(v) +} + +func (ec *executionContext) unmarshalOInt2ᚕint(ctx context.Context, v interface{}) ([]int, error) { + var vSlice []interface{} + if v != nil { + if tmp1, ok := v.([]interface{}); ok { + vSlice = tmp1 + } else { + vSlice = []interface{}{v} + } + } + var err error + res := make([]int, len(vSlice)) + for i := range vSlice { + res[i], err = ec.unmarshalNInt2int(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalOInt2ᚕint(ctx context.Context, sel ast.SelectionSet, v []int) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + for i := range v { + ret[i] = ec.marshalNInt2int(ctx, sel, v[i]) + } + + return ret +} + +func (ec *executionContext) unmarshalOInt2ᚖint(ctx context.Context, v interface{}) (*int, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalOInt2int(ctx, v) + return &res, err +} + +func (ec *executionContext) marshalOInt2ᚖint(ctx context.Context, sel ast.SelectionSet, v *int) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec.marshalOInt2int(ctx, sel, *v) +} + +func (ec *executionContext) marshalOIntegrationKey2githubᚗcomᚋtargetᚋgoalertᚋintegrationkeyᚐIntegrationKey(ctx context.Context, sel ast.SelectionSet, v integrationkey.IntegrationKey) graphql.Marshaler { + return ec._IntegrationKey(ctx, sel, &v) +} + +func (ec *executionContext) marshalOIntegrationKey2ᚖgithub.comᚋtargetᚋgoalertᚋintegrationkeyᚐIntegrationKey(ctx context.Context, sel ast.SelectionSet, v *integrationkey.IntegrationKey) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec._IntegrationKey(ctx, sel, v) +} + +func (ec *executionContext) unmarshalOLabelSearchOptions2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐLabelSearchOptions(ctx context.Context, v interface{}) (LabelSearchOptions, error) { + return ec.unmarshalInputLabelSearchOptions(ctx, v) +} + +func (ec *executionContext) unmarshalOLabelSearchOptions2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐLabelSearchOptions(ctx context.Context, v interface{}) (*LabelSearchOptions, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalOLabelSearchOptions2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐLabelSearchOptions(ctx, v) + return &res, err +} + +func (ec *executionContext) marshalORotation2githubᚗcomᚋtargetᚋgoalertᚋscheduleᚋrotationᚐRotation(ctx context.Context, sel ast.SelectionSet, v rotation.Rotation) graphql.Marshaler { + return ec._Rotation(ctx, sel, &v) +} + +func (ec *executionContext) marshalORotation2ᚖgithub.comᚋtargetᚋgoalertᚋscheduleᚋrotationᚐRotation(ctx context.Context, sel ast.SelectionSet, v *rotation.Rotation) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec._Rotation(ctx, sel, v) +} + +func (ec *executionContext) unmarshalORotationSearchOptions2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐRotationSearchOptions(ctx context.Context, v interface{}) (RotationSearchOptions, error) { + return ec.unmarshalInputRotationSearchOptions(ctx, v) +} + +func (ec *executionContext) unmarshalORotationSearchOptions2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐRotationSearchOptions(ctx context.Context, v interface{}) (*RotationSearchOptions, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalORotationSearchOptions2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐRotationSearchOptions(ctx, v) + return &res, err +} + +func (ec *executionContext) unmarshalORotationType2githubᚗcomᚋtargetᚋgoalertᚋscheduleᚋrotationᚐType(ctx context.Context, v interface{}) (rotation.Type, error) { + var res rotation.Type + return res, res.UnmarshalGQL(v) +} + +func (ec *executionContext) marshalORotationType2githubᚗcomᚋtargetᚋgoalertᚋscheduleᚋrotationᚐType(ctx context.Context, sel ast.SelectionSet, v rotation.Type) graphql.Marshaler { + return v +} + +func (ec *executionContext) unmarshalORotationType2ᚖgithub.comᚋtargetᚋgoalertᚋscheduleᚋrotationᚐType(ctx context.Context, v interface{}) (*rotation.Type, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalORotationType2githubᚗcomᚋtargetᚋgoalertᚋscheduleᚋrotationᚐType(ctx, v) + return &res, err +} + +func (ec *executionContext) marshalORotationType2ᚖgithub.comᚋtargetᚋgoalertᚋscheduleᚋrotationᚐType(ctx context.Context, sel ast.SelectionSet, v *rotation.Type) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return v +} + +func (ec *executionContext) marshalOSchedule2githubᚗcomᚋtargetᚋgoalertᚋscheduleᚐSchedule(ctx context.Context, sel ast.SelectionSet, v schedule.Schedule) graphql.Marshaler { + return ec._Schedule(ctx, sel, &v) +} + +func (ec *executionContext) marshalOSchedule2ᚖgithub.comᚋtargetᚋgoalertᚋscheduleᚐSchedule(ctx context.Context, sel ast.SelectionSet, v *schedule.Schedule) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec._Schedule(ctx, sel, v) +} + +func (ec *executionContext) unmarshalOScheduleSearchOptions2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐScheduleSearchOptions(ctx context.Context, v interface{}) (ScheduleSearchOptions, error) { + return ec.unmarshalInputScheduleSearchOptions(ctx, v) +} + +func (ec *executionContext) unmarshalOScheduleSearchOptions2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐScheduleSearchOptions(ctx context.Context, v interface{}) (*ScheduleSearchOptions, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalOScheduleSearchOptions2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐScheduleSearchOptions(ctx, v) + return &res, err +} + +func (ec *executionContext) marshalOScheduleTarget2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐScheduleTarget(ctx context.Context, sel ast.SelectionSet, v ScheduleTarget) graphql.Marshaler { + return ec._ScheduleTarget(ctx, sel, &v) +} + +func (ec *executionContext) marshalOScheduleTarget2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐScheduleTarget(ctx context.Context, sel ast.SelectionSet, v *ScheduleTarget) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec._ScheduleTarget(ctx, sel, v) +} + +func (ec *executionContext) unmarshalOScheduleTargetInput2ᚕgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐScheduleTargetInput(ctx context.Context, v interface{}) ([]ScheduleTargetInput, error) { + var vSlice []interface{} + if v != nil { + if tmp1, ok := v.([]interface{}); ok { + vSlice = tmp1 + } else { + vSlice = []interface{}{v} + } + } + var err error + res := make([]ScheduleTargetInput, len(vSlice)) + for i := range vSlice { + res[i], err = ec.unmarshalNScheduleTargetInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐScheduleTargetInput(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalOService2githubᚗcomᚋtargetᚋgoalertᚋserviceᚐService(ctx context.Context, sel ast.SelectionSet, v service.Service) graphql.Marshaler { + return ec._Service(ctx, sel, &v) +} + +func (ec *executionContext) marshalOService2ᚖgithub.comᚋtargetᚋgoalertᚋserviceᚐService(ctx context.Context, sel ast.SelectionSet, v *service.Service) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec._Service(ctx, sel, v) +} + +func (ec *executionContext) unmarshalOServiceSearchOptions2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐServiceSearchOptions(ctx context.Context, v interface{}) (ServiceSearchOptions, error) { + return ec.unmarshalInputServiceSearchOptions(ctx, v) +} + +func (ec *executionContext) unmarshalOServiceSearchOptions2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐServiceSearchOptions(ctx context.Context, v interface{}) (*ServiceSearchOptions, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalOServiceSearchOptions2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐServiceSearchOptions(ctx, v) + return &res, err +} + +func (ec *executionContext) unmarshalOSetLabelInput2ᚕgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐSetLabelInput(ctx context.Context, v interface{}) ([]SetLabelInput, error) { + var vSlice []interface{} + if v != nil { + if tmp1, ok := v.([]interface{}); ok { + vSlice = tmp1 + } else { + vSlice = []interface{}{v} + } + } + var err error + res := make([]SetLabelInput, len(vSlice)) + for i := range vSlice { + res[i], err = ec.unmarshalNSetLabelInput2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐSetLabelInput(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalOSlackChannel2githubᚗcomᚋtargetᚋgoalertᚋnotificationᚋslackᚐChannel(ctx context.Context, sel ast.SelectionSet, v slack.Channel) graphql.Marshaler { + return ec._SlackChannel(ctx, sel, &v) +} + +func (ec *executionContext) marshalOSlackChannel2ᚖgithub.comᚋtargetᚋgoalertᚋnotificationᚋslackᚐChannel(ctx context.Context, sel ast.SelectionSet, v *slack.Channel) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec._SlackChannel(ctx, sel, v) +} + +func (ec *executionContext) unmarshalOSlackChannelSearchOptions2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐSlackChannelSearchOptions(ctx context.Context, v interface{}) (SlackChannelSearchOptions, error) { + return ec.unmarshalInputSlackChannelSearchOptions(ctx, v) +} + +func (ec *executionContext) unmarshalOSlackChannelSearchOptions2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐSlackChannelSearchOptions(ctx context.Context, v interface{}) (*SlackChannelSearchOptions, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalOSlackChannelSearchOptions2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐSlackChannelSearchOptions(ctx, v) + return &res, err +} + +func (ec *executionContext) unmarshalOString2string(ctx context.Context, v interface{}) (string, error) { + return graphql.UnmarshalString(v) +} + +func (ec *executionContext) marshalOString2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler { + return graphql.MarshalString(v) +} + +func (ec *executionContext) unmarshalOString2ᚕstring(ctx context.Context, v interface{}) ([]string, error) { + var vSlice []interface{} + if v != nil { + if tmp1, ok := v.([]interface{}); ok { + vSlice = tmp1 + } else { + vSlice = []interface{}{v} + } + } + var err error + res := make([]string, len(vSlice)) + for i := range vSlice { + res[i], err = ec.unmarshalNString2string(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalOString2ᚕstring(ctx context.Context, sel ast.SelectionSet, v []string) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + for i := range v { + ret[i] = ec.marshalNString2string(ctx, sel, v[i]) + } + + return ret +} + +func (ec *executionContext) unmarshalOString2ᚖstring(ctx context.Context, v interface{}) (*string, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalOString2string(ctx, v) + return &res, err +} + +func (ec *executionContext) marshalOString2ᚖstring(ctx context.Context, sel ast.SelectionSet, v *string) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec.marshalOString2string(ctx, sel, *v) +} + +func (ec *executionContext) unmarshalOTargetInput2githubᚗcomᚋtargetᚋgoalertᚋassignmentᚐRawTarget(ctx context.Context, v interface{}) (assignment.RawTarget, error) { + return ec.unmarshalInputTargetInput(ctx, v) +} + +func (ec *executionContext) unmarshalOTargetInput2ᚕgithub.comᚋtargetᚋgoalertᚋassignmentᚐRawTarget(ctx context.Context, v interface{}) ([]assignment.RawTarget, error) { + var vSlice []interface{} + if v != nil { + if tmp1, ok := v.([]interface{}); ok { + vSlice = tmp1 + } else { + vSlice = []interface{}{v} + } + } + var err error + res := make([]assignment.RawTarget, len(vSlice)) + for i := range vSlice { + res[i], err = ec.unmarshalNTargetInput2githubᚗcomᚋtargetᚋgoalertᚋassignmentᚐRawTarget(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) unmarshalOTargetInput2ᚖgithub.comᚋtargetᚋgoalertᚋassignmentᚐRawTarget(ctx context.Context, v interface{}) (*assignment.RawTarget, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalOTargetInput2githubᚗcomᚋtargetᚋgoalertᚋassignmentᚐRawTarget(ctx, v) + return &res, err +} + +func (ec *executionContext) unmarshalOTimeZoneSearchOptions2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐTimeZoneSearchOptions(ctx context.Context, v interface{}) (TimeZoneSearchOptions, error) { + return ec.unmarshalInputTimeZoneSearchOptions(ctx, v) +} + +func (ec *executionContext) unmarshalOTimeZoneSearchOptions2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐTimeZoneSearchOptions(ctx context.Context, v interface{}) (*TimeZoneSearchOptions, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalOTimeZoneSearchOptions2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐTimeZoneSearchOptions(ctx, v) + return &res, err +} + +func (ec *executionContext) marshalOUser2githubᚗcomᚋtargetᚋgoalertᚋuserᚐUser(ctx context.Context, sel ast.SelectionSet, v user.User) graphql.Marshaler { + return ec._User(ctx, sel, &v) +} + +func (ec *executionContext) marshalOUser2ᚖgithub.comᚋtargetᚋgoalertᚋuserᚐUser(ctx context.Context, sel ast.SelectionSet, v *user.User) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec._User(ctx, sel, v) +} + +func (ec *executionContext) marshalOUserContactMethod2githubᚗcomᚋtargetᚋgoalertᚋuserᚋcontactmethodᚐContactMethod(ctx context.Context, sel ast.SelectionSet, v contactmethod.ContactMethod) graphql.Marshaler { + return ec._UserContactMethod(ctx, sel, &v) +} + +func (ec *executionContext) marshalOUserContactMethod2ᚖgithub.comᚋtargetᚋgoalertᚋuserᚋcontactmethodᚐContactMethod(ctx context.Context, sel ast.SelectionSet, v *contactmethod.ContactMethod) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec._UserContactMethod(ctx, sel, v) +} + +func (ec *executionContext) marshalOUserNotificationRule2githubᚗcomᚋtargetᚋgoalertᚋuserᚋnotificationruleᚐNotificationRule(ctx context.Context, sel ast.SelectionSet, v notificationrule.NotificationRule) graphql.Marshaler { + return ec._UserNotificationRule(ctx, sel, &v) +} + +func (ec *executionContext) marshalOUserNotificationRule2ᚖgithub.comᚋtargetᚋgoalertᚋuserᚋnotificationruleᚐNotificationRule(ctx context.Context, sel ast.SelectionSet, v *notificationrule.NotificationRule) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec._UserNotificationRule(ctx, sel, v) +} + +func (ec *executionContext) marshalOUserOverride2githubᚗcomᚋtargetᚋgoalertᚋoverrideᚐUserOverride(ctx context.Context, sel ast.SelectionSet, v override.UserOverride) graphql.Marshaler { + return ec._UserOverride(ctx, sel, &v) +} + +func (ec *executionContext) marshalOUserOverride2ᚖgithub.comᚋtargetᚋgoalertᚋoverrideᚐUserOverride(ctx context.Context, sel ast.SelectionSet, v *override.UserOverride) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec._UserOverride(ctx, sel, v) +} + +func (ec *executionContext) unmarshalOUserOverrideSearchOptions2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUserOverrideSearchOptions(ctx context.Context, v interface{}) (UserOverrideSearchOptions, error) { + return ec.unmarshalInputUserOverrideSearchOptions(ctx, v) +} + +func (ec *executionContext) unmarshalOUserOverrideSearchOptions2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐUserOverrideSearchOptions(ctx context.Context, v interface{}) (*UserOverrideSearchOptions, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalOUserOverrideSearchOptions2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUserOverrideSearchOptions(ctx, v) + return &res, err +} + +func (ec *executionContext) unmarshalOUserRole2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUserRole(ctx context.Context, v interface{}) (UserRole, error) { + var res UserRole + return res, res.UnmarshalGQL(v) +} + +func (ec *executionContext) marshalOUserRole2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUserRole(ctx context.Context, sel ast.SelectionSet, v UserRole) graphql.Marshaler { + return v +} + +func (ec *executionContext) unmarshalOUserRole2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐUserRole(ctx context.Context, v interface{}) (*UserRole, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalOUserRole2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUserRole(ctx, v) + return &res, err +} + +func (ec *executionContext) marshalOUserRole2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐUserRole(ctx context.Context, sel ast.SelectionSet, v *UserRole) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return v +} + +func (ec *executionContext) unmarshalOUserSearchOptions2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUserSearchOptions(ctx context.Context, v interface{}) (UserSearchOptions, error) { + return ec.unmarshalInputUserSearchOptions(ctx, v) +} + +func (ec *executionContext) unmarshalOUserSearchOptions2ᚖgithub.comᚋtargetᚋgoalertᚋgraphql2ᚐUserSearchOptions(ctx context.Context, v interface{}) (*UserSearchOptions, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalOUserSearchOptions2githubᚗcomᚋtargetᚋgoalertᚋgraphql2ᚐUserSearchOptions(ctx, v) + return &res, err +} + +func (ec *executionContext) marshalO__EnumValue2ᚕgithub.comᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValue(ctx context.Context, sel ast.SelectionSet, v []introspection.EnumValue) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalN__EnumValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValue(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalO__Field2ᚕgithub.comᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐField(ctx context.Context, sel ast.SelectionSet, v []introspection.Field) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalN__Field2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐField(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalO__InputValue2ᚕgithub.comᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx context.Context, sel ast.SelectionSet, v []introspection.InputValue) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalN__InputValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalO__Schema2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐSchema(ctx context.Context, sel ast.SelectionSet, v introspection.Schema) graphql.Marshaler { + return ec.___Schema(ctx, sel, &v) +} + +func (ec *executionContext) marshalO__Schema2ᚖgithub.comᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐSchema(ctx context.Context, sel ast.SelectionSet, v *introspection.Schema) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec.___Schema(ctx, sel, v) +} + +func (ec *executionContext) marshalO__Type2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v introspection.Type) graphql.Marshaler { + return ec.___Type(ctx, sel, &v) +} + +func (ec *executionContext) marshalO__Type2ᚕgithub.comᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v []introspection.Type) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + rctx := &graphql.ResolverContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithResolverContext(ctx, rctx) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalN__Type2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalO__Type2ᚖgithub.comᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v *introspection.Type) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec.___Type(ctx, sel, v) +} + +// endregion ***************************** type.gotpl ***************************** diff --git a/graphql2/gqlgen.yml b/graphql2/gqlgen.yml new file mode 100644 index 0000000000..930c6cc38f --- /dev/null +++ b/graphql2/gqlgen.yml @@ -0,0 +1,64 @@ +schema: ./schema.graphql +exec: + filename: generated.go +model: + filename: models_gen.go +models: + AuthSubject: + model: github.com/target/goalert/user.AuthSubject + AuthSubjectInput: + model: github.com/target/goalert/user.AuthSubject + User: + model: github.com/target/goalert/user.User + fields: + statusUpdateContactMethodID: + fieldName: AlertStatusCMID + UserContactMethod: + model: github.com/target/goalert/user/contactmethod.ContactMethod + UserNotificationRule: + model: github.com/target/goalert/user/notificationrule.NotificationRule + Target: + model: github.com/target/goalert/assignment.RawTarget + fields: + name: + resolver: true + TargetInput: + model: github.com/target/goalert/assignment.RawTarget + TargetType: + model: github.com/target/goalert/assignment.TargetType + Alert: + model: github.com/target/goalert/alert.Alert + AlertState: + model: github.com/target/goalert/alert.State + Service: + model: github.com/target/goalert/service.Service + ISOTimestamp: + model: github.com/target/goalert/graphql2.ISOTimestamp + EscalationPolicy: + model: github.com/target/goalert/escalation.Policy + Rotation: + model: github.com/target/goalert/schedule/rotation.Rotation + Schedule: + model: github.com/target/goalert/schedule.Schedule + ServiceOnCallUser: + model: github.com/target/goalert/oncall.ServiceOnCallUser + EscalationPolicyStep: + model: github.com/target/goalert/escalation.Step + RotationType: + model: github.com/target/goalert/schedule/rotation.Type + IntegrationKey: + model: github.com/target/goalert/integrationkey.IntegrationKey + Label: + model: github.com/target/goalert/label.Label + ClockTime: + model: github.com/target/goalert/graphql2.ClockTime + ScheduleRule: + model: github.com/target/goalert/schedule/rule.Rule + UserOverride: + model: github.com/target/goalert/override.UserOverride + OnCallShift: + model: github.com/target/goalert/oncall.Shift + ContactMethodType: + model: github.com/target/goalert/graphql2.ContactMethodType + SlackChannel: + model: github.com/target/goalert/notification/slack.Channel diff --git a/graphql2/graphqlapp/alert.go b/graphql2/graphqlapp/alert.go new file mode 100644 index 0000000000..9261414efa --- /dev/null +++ b/graphql2/graphqlapp/alert.go @@ -0,0 +1,189 @@ +package graphqlapp + +import ( + context "context" + "fmt" + "github.com/target/goalert/alert" + "github.com/target/goalert/assignment" + "github.com/target/goalert/graphql2" + "github.com/target/goalert/permission" + "github.com/target/goalert/search" + "github.com/target/goalert/service" + "github.com/target/goalert/validation/validate" + + "github.com/pkg/errors" +) + +type Alert App + +func (a *App) Alert() graphql2.AlertResolver { return (*Alert)(a) } + +func (q *Query) Alert(ctx context.Context, alertID int) (*alert.Alert, error) { + return (*App)(q).FindOneAlert(ctx, alertID) +} + +func (q *Query) mergeFavorites(ctx context.Context, svcs []string) ([]string, error) { + targets, err := q.FavoriteStore.FindAll(ctx, permission.UserID(ctx), []assignment.TargetType{assignment.TargetTypeService}) + if err != nil { + return nil, err + } + if len(svcs) == 0 { + for _, t := range targets { + svcs = append(svcs, t.TargetID()) + } + } else { + // favorites AND serviceIDs + m := make(map[string]bool, len(svcs)) + for _, o := range svcs { + m[o] = true + } + // empty slice + svcs = svcs[:0] + + for _, t := range targets { + if !m[t.TargetID()] { + continue + } + svcs = append(svcs, t.TargetID()) + } + // Here we have the intersection of favorites and user-specified serviceIDs in opts.FilterByServiceID + } + return svcs, nil +} + +func (q *Query) Alerts(ctx context.Context, opts *graphql2.AlertSearchOptions) (conn *graphql2.AlertConnection, err error) { + if opts == nil { + opts = new(graphql2.AlertSearchOptions) + } + + var s alert.SearchOptions + if opts.First != nil { + s.Limit = *opts.First + } + + if s.Limit == 0 { + s.Limit = 15 + } + if opts.Search != nil { + s.Search = *opts.Search + } + s.Omit = opts.Omit + + err = validate.Many( + validate.Range("ServiceIDs", len(opts.FilterByServiceID), 0, 50), + validate.Range("First", s.Limit, 1, 100), + ) + if err != nil { + return nil, err + } + + hasCursor := opts.After != nil && *opts.After != "" + + if hasCursor { + err = search.ParseCursor(*opts.After, &s) + if err != nil { + return nil, errors.Wrap(err, "parse cursor") + } + } else { + if opts.FavoritesOnly != nil && *opts.FavoritesOnly { + s.Services, err = q.mergeFavorites(ctx, opts.FilterByServiceID) + if err != nil { + return nil, err + } + } else { + s.Services = opts.FilterByServiceID + } + for _, f := range opts.FilterByStatus { + switch f { + case graphql2.AlertStatusStatusAcknowledged: + s.Status = append(s.Status, alert.StatusActive) + case graphql2.AlertStatusStatusUnacknowledged: + s.Status = append(s.Status, alert.StatusTriggered) + case graphql2.AlertStatusStatusClosed: + s.Status = append(s.Status, alert.StatusClosed) + } + } + } + + s.Limit++ + + alerts, err := q.AlertStore.Search(ctx, &s) + if err != nil { + return conn, err + } + + conn = new(graphql2.AlertConnection) + if len(alerts) == s.Limit { + conn.PageInfo.HasNextPage = true + alerts = alerts[:len(alerts)-1] + } + conn.Nodes = alerts + if len(alerts) > 0 { + cur, err := search.Cursor(s) + if err != nil { + return nil, errors.Wrap(err, "serialize cursor") + } + conn.PageInfo.EndCursor = &cur + } + + return conn, nil +} + +func (a *Alert) ID(ctx context.Context, raw *alert.Alert) (string, error) { + return fmt.Sprintf("Alert(%d)", raw.ID), nil +} +func (a *Alert) Status(ctx context.Context, raw *alert.Alert) (graphql2.AlertStatus, error) { + switch raw.Status { + case alert.StatusTriggered: + return graphql2.AlertStatusStatusUnacknowledged, nil + case alert.StatusClosed: + return graphql2.AlertStatusStatusClosed, nil + case alert.StatusActive: + return graphql2.AlertStatusStatusAcknowledged, nil + } + return "", errors.New("unknown alert status " + string(raw.Status)) +} +func (a *Alert) AlertID(ctx context.Context, raw *alert.Alert) (int, error) { + return raw.ID, nil +} + +func (a *Alert) State(ctx context.Context, raw *alert.Alert) (*alert.State, error) { + return (*App)(a).FindOneAlertState(ctx, raw.ID) +} + +func (a *Alert) Service(ctx context.Context, raw *alert.Alert) (*service.Service, error) { + return (*App)(a).FindOneService(ctx, raw.ServiceID) +} + +func (m *Mutation) EscalateAlerts(ctx context.Context, ids []int) ([]alert.Alert, error) { + ids, err := m.AlertStore.EscalateMany(ctx, ids) + if err != nil { + return nil, err + } + + return m.AlertStore.FindMany(ctx, ids) +} + +func (m *Mutation) UpdateAlerts(ctx context.Context, args graphql2.UpdateAlertsInput) ([]alert.Alert, error) { + var status alert.Status + + err := validate.OneOf("Status", args.NewStatus, graphql2.AlertStatusStatusAcknowledged, graphql2.AlertStatusStatusClosed) + if err != nil { + return nil, err + } + + switch args.NewStatus { + case graphql2.AlertStatusStatusAcknowledged: + status = alert.StatusActive + case graphql2.AlertStatusStatusClosed: + status = alert.StatusClosed + } + + var updatedIDs []int + updatedIDs, err = m.AlertStore.UpdateManyAlertStatus(ctx, status, args.AlertIDs) + if err != nil { + return nil, err + } + + return m.AlertStore.FindMany(ctx, updatedIDs) +} diff --git a/graphql2/graphqlapp/app.go b/graphql2/graphqlapp/app.go new file mode 100644 index 0000000000..c1f4730bc6 --- /dev/null +++ b/graphql2/graphqlapp/app.go @@ -0,0 +1,217 @@ +package graphqlapp + +import ( + context "context" + "database/sql" + "net/http" + "sync" + "time" + + "github.com/99designs/gqlgen/graphql" + "github.com/99designs/gqlgen/handler" + "github.com/pkg/errors" + "github.com/target/goalert/alert" + "github.com/target/goalert/config" + "github.com/target/goalert/escalation" + "github.com/target/goalert/graphql2" + "github.com/target/goalert/integrationkey" + "github.com/target/goalert/label" + "github.com/target/goalert/notification" + "github.com/target/goalert/notification/slack" + "github.com/target/goalert/notificationchannel" + "github.com/target/goalert/oncall" + "github.com/target/goalert/override" + "github.com/target/goalert/permission" + "github.com/target/goalert/schedule" + "github.com/target/goalert/schedule/rotation" + "github.com/target/goalert/schedule/rule" + "github.com/target/goalert/service" + "github.com/target/goalert/timezone" + "github.com/target/goalert/user" + "github.com/target/goalert/user/contactmethod" + "github.com/target/goalert/user/favorite" + "github.com/target/goalert/user/notificationrule" + "github.com/target/goalert/util/errutil" + "github.com/target/goalert/util/log" + "github.com/target/goalert/validation" + "github.com/vektah/gqlparser/gqlerror" + "go.opencensus.io/trace" +) + +type App struct { + DB *sql.DB + UserStore user.Store + CMStore contactmethod.Store + NRStore notificationrule.Store + NCStore notificationchannel.Store + AlertStore alert.Store + ServiceStore service.Store + FavoriteStore favorite.Store + PolicyStore escalation.Store + ScheduleStore schedule.Store + RotationStore rotation.Store + OnCallStore oncall.Store + IntKeyStore integrationkey.Store + LabelStore label.Store + RuleStore rule.Store + OverrideStore override.Store + ConfigStore *config.Store + SlackStore *slack.ChannelSender + + NotificationStore notification.Store + + TimeZoneStore *timezone.Store +} + +func mustAuth(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + err := permission.LimitCheckAny(req.Context()) + if errutil.HTTPError(req.Context(), w, err) { + return + } + + h.ServeHTTP(w, req) + }) +} + +func (a *App) PlayHandler() http.Handler { + var data struct { + Version string + } + data.Version = playVersion + return mustAuth(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + err := playTmpl.Execute(w, data) + if err != nil { + log.Log(req.Context(), err) + http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) + } + })) +} + +type apolloTracingExt struct { + Version int `json:"version"` + Start time.Time `json:"startTime"` + End time.Time `json:"endTime"` + Duration time.Duration `json:"duration"` + Execution struct { + Resolvers []apolloTracingResolver `json:"resolvers"` + } `json:"execution"` + mx sync.Mutex +} +type apolloTracingResolver struct { + Path []interface{} `json:"path"` + ParentType string `json:"parentType"` + FieldName string `json:"fieldName"` + ReturnType string `json:"returnType"` + StartOffset time.Duration `json:"startOffset"` + Duration time.Duration `json:"duration"` +} + +type fieldErr struct { + FieldName string `json:"fieldName"` + Message string `json:"message"` +} + +func (a *App) Handler() http.Handler { + return mustAuth(handler.GraphQL( + graphql2.NewExecutableSchema(graphql2.Config{Resolvers: a}), + handler.RequestMiddleware(func(ctx context.Context, next func(ctx context.Context) []byte) []byte { + ctx = a.registerLoaders(ctx) + + if permission.Admin(ctx) { + rctx := graphql.GetRequestContext(ctx) + ext := &apolloTracingExt{ + Version: 1, + Start: time.Now(), + } + if rctx.Extensions == nil { + rctx.Extensions = make(map[string]interface{}, 1) + } + rctx.Extensions["tracing"] = ext + defer func() { + ext.End = time.Now() + ext.Duration = ext.End.Sub(ext.Start) + }() + } + + return next(ctx) + }), + + // middleware -> single field err to multi + handler.ResolverMiddleware(func(ctx context.Context, next graphql.Resolver) (res interface{}, err error) { + rctx := graphql.GetResolverContext(ctx) + + if ext, ok := graphql.GetRequestContext(ctx).Extensions["tracing"].(*apolloTracingExt); ok { + var res apolloTracingResolver + res.FieldName = rctx.Field.Name + res.ParentType = rctx.Object + res.Path = rctx.Path() + res.ReturnType = rctx.Field.Definition.Type.String() + ext.mx.Lock() + res.StartOffset = time.Since(ext.Start) + ext.mx.Unlock() + defer func() { + ext.mx.Lock() + res.Duration = time.Since(ext.Start) - res.StartOffset + ext.Execution.Resolvers = append(ext.Execution.Resolvers, res) + ext.mx.Unlock() + }() + } + ctx, sp := trace.StartSpan(ctx, "GQL."+rctx.Object+"."+rctx.Field.Name, trace.WithSpanKind(trace.SpanKindServer)) + defer sp.End() + sp.AddAttributes( + trace.StringAttribute("graphql.object", rctx.Object), + trace.StringAttribute("graphql.field.name", rctx.Field.Name), + ) + res, err = next(ctx) + if err != nil { + sp.Annotate([]trace.Attribute{ + trace.BoolAttribute("error", true), + }, err.Error()) + } else if rctx.Object == "Mutation" { + ctx = log.WithFields(ctx, log.Fields{ + "MutationName": rctx.Field.Name, + }) + log.Logf(ctx, "Mutation.") + } + + return res, err + }), + handler.ErrorPresenter(func(ctx context.Context, err error) *gqlerror.Error { + err = errutil.MapDBError(err) + isUnsafe, safeErr := errutil.ScrubError(err) + if isUnsafe { + log.Log(ctx, err) + } + gqlErr := graphql.DefaultErrorPresenter(ctx, safeErr) + + if m, ok := errors.Cause(safeErr).(validation.MultiFieldError); ok { + errs := make([]fieldErr, len(m.FieldErrors())) + for i, err := range m.FieldErrors() { + errs[i].FieldName = err.Field() + errs[i].Message = err.Reason() + } + gqlErr.Message = "Multiple fields failed validation." + gqlErr.Extensions = map[string]interface{}{ + "isMultiFieldError": true, + "fieldErrors": errs, + } + } else if e, ok := errors.Cause(safeErr).(validation.FieldError); ok { + type reasonable interface { + Reason() string + } + msg := e.Error() + if rs, ok := e.(reasonable); ok { + msg = rs.Reason() + } + gqlErr.Message = msg + gqlErr.Extensions = map[string]interface{}{ + "fieldName": e.Field(), + "isFieldError": true, + } + } + + return gqlErr + }), + )) +} diff --git a/graphql2/graphqlapp/config.go b/graphql2/graphqlapp/config.go new file mode 100644 index 0000000000..4070d2c9b0 --- /dev/null +++ b/graphql2/graphqlapp/config.go @@ -0,0 +1,36 @@ +package graphqlapp + +import ( + "context" + + "github.com/target/goalert/config" + "github.com/target/goalert/graphql2" + "github.com/target/goalert/permission" +) + +func (q *Query) Config(ctx context.Context, all *bool) ([]graphql2.ConfigValue, error) { + perm := []permission.Checker{permission.System, permission.Admin} + var publicOnly bool + if all == nil || !*all { + publicOnly = true + perm = append(perm, permission.User) + } + + err := permission.LimitCheckAny(ctx, perm...) + if err != nil { + return nil, err + } + + if publicOnly { + return graphql2.MapPublicConfigValues(q.ConfigStore.Config()), nil + } + + return graphql2.MapConfigValues(q.ConfigStore.Config()), nil +} + +func (m *Mutation) SetConfig(ctx context.Context, input []graphql2.ConfigValueInput) (bool, error) { + err := m.ConfigStore.UpdateConfig(ctx, func(cfg config.Config) (config.Config, error) { + return graphql2.ApplyConfigValues(cfg, input) + }) + return err == nil, err +} diff --git a/graphql2/graphqlapp/contactmethod.go b/graphql2/graphqlapp/contactmethod.go new file mode 100644 index 0000000000..2d5ca28412 --- /dev/null +++ b/graphql2/graphqlapp/contactmethod.go @@ -0,0 +1,63 @@ +package graphqlapp + +import ( + context "context" + "database/sql" + + "github.com/target/goalert/graphql2" + "github.com/target/goalert/user/contactmethod" + "github.com/target/goalert/validation" +) + +func (q *Query) UserContactMethod(ctx context.Context, id string) (*contactmethod.ContactMethod, error) { + return (*App)(q).FindOneCM(ctx, id) +} + +func (m *Mutation) CreateUserContactMethod(ctx context.Context, input graphql2.CreateUserContactMethodInput) (*contactmethod.ContactMethod, error) { + var cm *contactmethod.ContactMethod + err := withContextTx(ctx, m.DB, func(ctx context.Context, tx *sql.Tx) error { + var err error + cm, err = m.CMStore.CreateTx(ctx, tx, &contactmethod.ContactMethod{ + Name: input.Name, + Type: input.Type, + UserID: input.UserID, + Value: input.Value, + }) + if err != nil { + return err + } + + if input.NewUserNotificationRule != nil { + input.NewUserNotificationRule.UserID = &input.UserID + input.NewUserNotificationRule.ContactMethodID = &cm.ID + + _, err = m.CreateUserNotificationRule(ctx, *input.NewUserNotificationRule) + + if err != nil { + return validation.AddPrefix("newUserNotificationRule.", err) + } + } + return err + }) + if err != nil { + return nil, err + } + + return cm, nil +} +func (m *Mutation) UpdateUserContactMethod(ctx context.Context, input graphql2.UpdateUserContactMethodInput) (bool, error) { + err := withContextTx(ctx, m.DB, func(ctx context.Context, tx *sql.Tx) error { + cm, err := m.CMStore.FindOneTx(ctx, tx, input.ID) + if err != nil { + return err + } + if input.Name != nil { + cm.Name = *input.Name + } + if input.Value != nil { + cm.Value = *input.Value + } + return m.CMStore.UpdateTx(ctx, tx, cm) + }) + return err == nil, err +} diff --git a/graphql2/graphqlapp/dataloaders.go b/graphql2/graphqlapp/dataloaders.go new file mode 100644 index 0000000000..459c8c5706 --- /dev/null +++ b/graphql2/graphqlapp/dataloaders.go @@ -0,0 +1,116 @@ +package graphqlapp + +import ( + context "context" + "github.com/target/goalert/alert" + "github.com/target/goalert/dataloader" + "github.com/target/goalert/escalation" + "github.com/target/goalert/schedule" + "github.com/target/goalert/schedule/rotation" + "github.com/target/goalert/service" + "github.com/target/goalert/user" + "github.com/target/goalert/user/contactmethod" + + "github.com/pkg/errors" +) + +type dataLoaderKey int + +const ( + dataLoaderKeyAlert = dataLoaderKey(iota) + dataLoaderKeyEP + dataLoaderKeyRotation + dataLoaderKeySchedule + dataLoaderKeyService + dataLoaderKeyUser + dataLoaderKeyCM +) + +func (a *App) registerLoaders(ctx context.Context) context.Context { + ctx = context.WithValue(ctx, dataLoaderKeyAlert, dataloader.NewAlertLoader(ctx, a.AlertStore)) + ctx = context.WithValue(ctx, dataLoaderKeyEP, dataloader.NewPolicyLoader(ctx, a.PolicyStore)) + ctx = context.WithValue(ctx, dataLoaderKeyRotation, dataloader.NewRotationLoader(ctx, a.RotationStore)) + ctx = context.WithValue(ctx, dataLoaderKeySchedule, dataloader.NewScheduleLoader(ctx, a.ScheduleStore)) + ctx = context.WithValue(ctx, dataLoaderKeyService, dataloader.NewServiceLoader(ctx, a.ServiceStore)) + ctx = context.WithValue(ctx, dataLoaderKeyUser, dataloader.NewUserLoader(ctx, a.UserStore)) + ctx = context.WithValue(ctx, dataLoaderKeyCM, dataloader.NewCMLoader(ctx, a.CMStore)) + return ctx +} + +func (app *App) FindOneRotation(ctx context.Context, id string) (*rotation.Rotation, error) { + loader, ok := ctx.Value(dataLoaderKeyRotation).(*dataloader.RotationLoader) + if !ok { + return app.RotationStore.FindRotation(ctx, id) + } + + return loader.FetchOne(ctx, id) +} + +func (app *App) FindOneSchedule(ctx context.Context, id string) (*schedule.Schedule, error) { + loader, ok := ctx.Value(dataLoaderKeySchedule).(*dataloader.ScheduleLoader) + if !ok { + return app.ScheduleStore.FindOne(ctx, id) + } + + return loader.FetchOne(ctx, id) +} + +func (app *App) FindOneUser(ctx context.Context, id string) (*user.User, error) { + loader, ok := ctx.Value(dataLoaderKeyUser).(*dataloader.UserLoader) + if !ok { + return app.UserStore.FindOne(ctx, id) + } + + return loader.FetchOne(ctx, id) +} + +// FindOneCM will return a single contact method for the given id, using the contexts dataloader if enabled. +func (app *App) FindOneCM(ctx context.Context, id string) (*contactmethod.ContactMethod, error) { + loader, ok := ctx.Value(dataLoaderKeyUser).(*dataloader.CMLoader) + if !ok { + return app.CMStore.FindOne(ctx, id) + } + + return loader.FetchOne(ctx, id) +} + +func (app *App) FindOnePolicy(ctx context.Context, id string) (*escalation.Policy, error) { + loader, ok := ctx.Value(dataLoaderKeyEP).(*dataloader.PolicyLoader) + if !ok { + return app.PolicyStore.FindOnePolicy(ctx, id) + } + + return loader.FetchOne(ctx, id) +} + +func (app *App) FindOneService(ctx context.Context, id string) (*service.Service, error) { + loader, ok := ctx.Value(dataLoaderKeyService).(*dataloader.ServiceLoader) + if !ok { + return app.ServiceStore.FindOne(ctx, id) + } + + return loader.FetchOne(ctx, id) +} +func (app *App) FindOneAlertState(ctx context.Context, alertID int) (*alert.State, error) { + loader, ok := ctx.Value(dataLoaderKeyAlert).(*dataloader.AlertLoader) + if !ok { + epState, err := app.AlertStore.State(ctx, []int{alertID}) + if err != nil { + return nil, err + } + if len(epState) == 0 { + return nil, errors.New("no current epState for alert") + } + return &epState[0], nil + } + + return loader.FetchOneAlertState(ctx, alertID) +} +func (app *App) FindOneAlert(ctx context.Context, id int) (*alert.Alert, error) { + loader, ok := ctx.Value(dataLoaderKeyAlert).(*dataloader.AlertLoader) + if !ok { + return app.AlertStore.FindOne(ctx, id) + } + + return loader.FetchOneAlert(ctx, id) +} diff --git a/graphql2/graphqlapp/escalationpolicy.go b/graphql2/graphqlapp/escalationpolicy.go new file mode 100644 index 0000000000..57dfb080a8 --- /dev/null +++ b/graphql2/graphqlapp/escalationpolicy.go @@ -0,0 +1,393 @@ +package graphqlapp + +import ( + "context" + "database/sql" + "fmt" + "strconv" + + "github.com/target/goalert/assignment" + "github.com/target/goalert/escalation" + "github.com/target/goalert/graphql2" + "github.com/target/goalert/permission" + "github.com/target/goalert/search" + "github.com/target/goalert/validation" + "github.com/target/goalert/validation/validate" +) + +type EscalationPolicy App +type EscalationPolicyStep App + +func (a *App) EscalationPolicy() graphql2.EscalationPolicyResolver { return (*EscalationPolicy)(a) } +func (a *App) EscalationPolicyStep() graphql2.EscalationPolicyStepResolver { + return (*EscalationPolicyStep)(a) +} + +func contains(ids []string, id string) bool { + for _, x := range ids { + if x == id { + return true + } + } + return false +} + +func (m *Mutation) CreateEscalationPolicyStep(ctx context.Context, input graphql2.CreateEscalationPolicyStepInput) (step *escalation.Step, err error) { + if len(input.Targets) != 0 && input.NewRotation != nil { + return nil, validate.Many( + validation.NewFieldError("targets", "cannot be used with `newRotation`"), + validation.NewFieldError("newRotation", "cannot be used with `targets`"), + ) + } + + if len(input.Targets) != 0 && input.NewSchedule != nil { + return nil, validate.Many( + validation.NewFieldError("targets", "cannot be used with `newSchedule`"), + validation.NewFieldError("newSchedule", "cannot be used with `targets`"), + ) + } + + if input.NewSchedule != nil && input.NewRotation != nil { + return nil, validate.Many( + validation.NewFieldError("newSchedule", "cannot be used with `newRotation`"), + validation.NewFieldError("newRotation", "cannot be used with `newSchedule`"), + ) + } + + err = withContextTx(ctx, m.DB, func(ctx context.Context, tx *sql.Tx) error { + s := &escalation.Step{ + DelayMinutes: input.DelayMinutes, + } + if input.EscalationPolicyID != nil { + s.PolicyID = *input.EscalationPolicyID + } + + step, err = m.PolicyStore.CreateStepTx(ctx, tx, s) + if err != nil { + return err + } + + if input.NewRotation != nil { + rot, err := m.CreateRotation(ctx, *input.NewRotation) + if err != nil { + return validation.AddPrefix("newRotation.", err) + } + tgt := assignment.RotationTarget(rot.ID) + step.Targets = append(step.Targets, tgt) + + // Should add to escalation_policy_actions + err = m.PolicyStore.AddStepTargetTx(ctx, tx, step.ID, tgt) + if err != nil { + return validation.AddPrefix("newRotation.", err) + } + } + + if input.NewSchedule != nil { + s, err := m.CreateSchedule(ctx, *input.NewSchedule) + if err != nil { + return validation.AddPrefix("newSchedule.", err) + } + tgt := assignment.ScheduleTarget(s.ID) + step.Targets = append(step.Targets, tgt) + + // Should add to escalation_policy_actions + err = m.PolicyStore.AddStepTargetTx(ctx, tx, step.ID, tgt) + if err != nil { + return validation.AddPrefix("newSchedule.", err) + } + } + + userID := permission.UserID(ctx) + for i, tgt := range input.Targets { + if tgt.Type == assignment.TargetTypeUser && tgt.ID == "__current_user" { + tgt.ID = userID + } + err = m.PolicyStore.AddStepTargetTx(ctx, tx, step.ID, tgt) + if err != nil { + return validation.AddPrefix("targets["+strconv.Itoa(i)+"].", err) + } + } + + return err + }) + + return step, err +} + +func (m *Mutation) CreateEscalationPolicy(ctx context.Context, input graphql2.CreateEscalationPolicyInput) (pol *escalation.Policy, err error) { + err = withContextTx(ctx, m.DB, func(ctx context.Context, tx *sql.Tx) error { + p := &escalation.Policy{ + Name: input.Name, + } + if input.Repeat != nil { + p.Repeat = *input.Repeat + } + if input.Description != nil { + p.Description = *input.Description + } + + pol, err = m.PolicyStore.CreatePolicyTx(ctx, tx, p) + if err != nil { + return err + } + + for i, step := range input.Steps { + step.EscalationPolicyID = &pol.ID + _, err = m.CreateEscalationPolicyStep(ctx, step) + if err != nil { + return validation.AddPrefix("Steps["+strconv.Itoa(i)+"].", err) + } + } + return err + }) + + return pol, err +} + +func (m *Mutation) UpdateEscalationPolicy(ctx context.Context, input graphql2.UpdateEscalationPolicyInput) (bool, error) { + err := withContextTx(ctx, m.DB, func(ctx context.Context, tx *sql.Tx) error { + ep, err := m.PolicyStore.FindOnePolicyForUpdateTx(ctx, tx, input.ID) + if err != nil { + return err + } + + if input.Name != nil { + ep.Name = *input.Name + } + + if input.Description != nil { + ep.Description = *input.Description + } + + if input.Repeat != nil { + ep.Repeat = *input.Repeat + } + + err = m.PolicyStore.UpdatePolicyTx(ctx, tx, ep) + if err != nil { + return err + } + + if input.StepIDs != nil { + // get current steps on policy + steps, err := m.PolicyStore.FindAllStepsTx(ctx, tx, input.ID) + if err != nil { + return err + } + + // get list of step ids + var stepIDs []string + for _, step := range steps { + stepIDs = append(stepIDs, step.ID) + } + + // delete existing id if not found in input steps slice + for _, stepID := range stepIDs { + if !contains(input.StepIDs, stepID) { + _, err = m.PolicyStore.DeleteStepTx(ctx, tx, stepID) + if err != nil { + return err + } + } + } + + // loop through input steps to update order + for i, stepID := range input.StepIDs { + if !contains(stepIDs, stepID) { + return validation.NewFieldError("steps["+strconv.Itoa(i)+"]", "uuid does not exist on policy") + } + + err = m.PolicyStore.UpdateStepNumberTx(ctx, tx, stepID, i) + if err != nil { + return err + } + } + } + + return err + }) + + return true, err +} + +func (m *Mutation) UpdateEscalationPolicyStep(ctx context.Context, input graphql2.UpdateEscalationPolicyStepInput) (bool, error) { + err := withContextTx(ctx, m.DB, func(ctx context.Context, tx *sql.Tx) error { + step, err := m.PolicyStore.FindOneStepForUpdateTx(ctx, tx, input.ID) // get delay + if err != nil { + return err + } + + // update delay if provided + if input.DelayMinutes != nil { + step.DelayMinutes = *input.DelayMinutes + + err = m.PolicyStore.UpdateStepDelayTx(ctx, tx, step.ID, step.DelayMinutes) + if err != nil { + return err + } + } + + // update targets if provided + if input.Targets != nil { + step.Targets = make([]assignment.Target, len(input.Targets)) + for i, tgt := range input.Targets { + step.Targets[i] = tgt + } + + // get current targets on step + curr, err := m.PolicyStore.FindAllStepTargetsTx(ctx, tx, step.ID) + if err != nil { + return err + } + + wantedTargets := make(map[assignment.RawTarget]int, len(step.Targets)) + currentTargets := make(map[assignment.RawTarget]bool, len(curr)) + + // construct maps + for i, tgt := range step.Targets { + rt := assignment.NewRawTarget(tgt) + if oldIdx, ok := wantedTargets[rt]; ok { + return validation.NewFieldError(fmt.Sprintf("Targets[%d]", i), fmt.Sprintf("Duplicates existing target at index %d.", oldIdx)) + } + wantedTargets[rt] = i + } + for _, tgt := range curr { + currentTargets[assignment.NewRawTarget(tgt)] = true + } + + // add targets in wanted that are not in curr + for tgt, idx := range wantedTargets { + if currentTargets[tgt] { + continue + } + + // add new step + err = m.PolicyStore.AddStepTargetTx(ctx, tx, step.ID, tgt) + if err != nil { + return validation.AddPrefix(fmt.Sprintf("Targets[%d].", idx), err) + } + } + + // remove targets in curr that are not in wanted + for tgt := range currentTargets { + if _, ok := wantedTargets[tgt]; ok { + continue + } + + // delete unwanted step + err = m.PolicyStore.DeleteStepTargetTx(ctx, tx, step.ID, tgt) + if err != nil { + return err + } + } + } + + return err + }) + + return true, err +} + +func (step *EscalationPolicyStep) Targets(ctx context.Context, raw *escalation.Step) ([]assignment.RawTarget, error) { + // TODO: use dataloader + var targets []assignment.Target + var err error + if len(raw.Targets) > 0 { + targets = raw.Targets + } else { + targets, err = step.PolicyStore.FindAllStepTargets(ctx, raw.ID) + if err != nil { + return nil, err + } + } + + result := make([]assignment.RawTarget, len(targets)) + for i, tgt := range targets { + switch t := tgt.(type) { + case *assignment.RawTarget: + result[i] = *t + case assignment.RawTarget: + result[i] = t + default: + result[i] = assignment.NewRawTarget(t) + } + } + + return result, nil +} +func (step *EscalationPolicyStep) EscalationPolicy(ctx context.Context, raw *escalation.Step) (*escalation.Policy, error) { + return (*App)(step).FindOnePolicy(ctx, raw.PolicyID) +} + +func (ep *EscalationPolicy) Steps(ctx context.Context, raw *escalation.Policy) ([]escalation.Step, error) { + return ep.PolicyStore.FindAllSteps(ctx, raw.ID) +} + +func (ep *EscalationPolicy) AssignedTo(ctx context.Context, raw *escalation.Policy) ([]assignment.RawTarget, error) { + svcs, err := ep.ServiceStore.FindAllByEP(ctx, raw.ID) + if err != nil { + return nil, err + } + + var tgts []assignment.RawTarget + for _, svc := range svcs { + var tgt assignment.RawTarget + tgt.ID = svc.ID + tgt.Name = svc.Name + tgt.Type = assignment.TargetTypeService + tgts = append(tgts, tgt) + } + + return tgts, nil +} + +func (q *Query) EscalationPolicy(ctx context.Context, id string) (*escalation.Policy, error) { + return (*App)(q).FindOnePolicy(ctx, id) +} + +func (q *Query) EscalationPolicies(ctx context.Context, opts *graphql2.EscalationPolicySearchOptions) (conn *graphql2.EscalationPolicyConnection, err error) { + if opts == nil { + opts = &graphql2.EscalationPolicySearchOptions{} + } + + var searchOpts escalation.SearchOptions + if opts.Search != nil { + searchOpts.Search = *opts.Search + } + searchOpts.Omit = opts.Omit + if opts.After != nil && *opts.After != "" { + err = search.ParseCursor(*opts.After, &searchOpts) + if err != nil { + return nil, err + } + } + if opts.First != nil { + searchOpts.Limit = *opts.First + } + if searchOpts.Limit == 0 { + searchOpts.Limit = 15 + } + + searchOpts.Limit++ + pols, err := q.PolicyStore.Search(ctx, &searchOpts) + if err != nil { + return nil, err + } + conn = new(graphql2.EscalationPolicyConnection) + if len(pols) == searchOpts.Limit { + pols = pols[:len(pols)-1] + conn.PageInfo.HasNextPage = true + } + if len(pols) > 0 { + last := pols[len(pols)-1] + searchOpts.After.Name = last.Name + + cur, err := search.Cursor(searchOpts) + if err != nil { + return nil, err + } + conn.PageInfo.EndCursor = &cur + } + conn.Nodes = pols + return conn, err +} diff --git a/graphql2/graphqlapp/integrationkey.go b/graphql2/graphqlapp/integrationkey.go new file mode 100644 index 0000000000..d2068e8f41 --- /dev/null +++ b/graphql2/graphqlapp/integrationkey.go @@ -0,0 +1,56 @@ +package graphqlapp + +import ( + context "context" + "database/sql" + "net/url" + + "github.com/target/goalert/config" + "github.com/target/goalert/graphql2" + "github.com/target/goalert/integrationkey" +) + +type IntegrationKey App + +func (a *App) IntegrationKey() graphql2.IntegrationKeyResolver { return (*IntegrationKey)(a) } + +func (q *Query) IntegrationKey(ctx context.Context, id string) (*integrationkey.IntegrationKey, error) { + return q.IntKeyStore.FindOne(ctx, id) +} +func (m *Mutation) CreateIntegrationKey(ctx context.Context, input graphql2.CreateIntegrationKeyInput) (key *integrationkey.IntegrationKey, err error) { + var serviceID string + if input.ServiceID != nil { + serviceID = *input.ServiceID + } + err = withContextTx(ctx, m.DB, func(ctx context.Context, tx *sql.Tx) error { + key = &integrationkey.IntegrationKey{ + ServiceID: serviceID, + Name: input.Name, + Type: integrationkey.Type(input.Type), + } + key, err = m.IntKeyStore.CreateKeyTx(ctx, tx, key) + return err + }) + return key, err +} +func (key *IntegrationKey) Type(ctx context.Context, raw *integrationkey.IntegrationKey) (graphql2.IntegrationKeyType, error) { + return graphql2.IntegrationKeyType(raw.Type), nil +} +func (key *IntegrationKey) Href(ctx context.Context, raw *integrationkey.IntegrationKey) (string, error) { + cfg := config.FromContext(ctx) + q := make(url.Values) + q.Set("token", raw.ID) + switch raw.Type { + case integrationkey.TypeGeneric: + return cfg.CallbackURL("/api/v2/generic/incoming", q), nil + case integrationkey.TypeGrafana: + return cfg.CallbackURL("/api/v2/grafana/incoming", q), nil + case integrationkey.TypeEmail: + if !cfg.Mailgun.Enable || cfg.Mailgun.EmailDomain == "" { + return "", nil + } + return "mailto:" + raw.ID + "@" + cfg.Mailgun.EmailDomain, nil + } + + return "", nil +} diff --git a/graphql2/graphqlapp/label.go b/graphql2/graphqlapp/label.go new file mode 100644 index 0000000000..dc4ff630a6 --- /dev/null +++ b/graphql2/graphqlapp/label.go @@ -0,0 +1,95 @@ +package graphqlapp + +import ( + context "context" + "database/sql" + "github.com/target/goalert/config" + "github.com/target/goalert/graphql2" + "github.com/target/goalert/label" + "github.com/target/goalert/search" + "github.com/target/goalert/validation" +) + +func (q *Query) Labels(ctx context.Context, input *graphql2.LabelSearchOptions) (conn *graphql2.LabelConnection, err error) { + if input == nil { + input = &graphql2.LabelSearchOptions{} + } + + var searchOpts label.SearchOptions + if input.Search != nil { + searchOpts.Search = *input.Search + } + searchOpts.Omit = input.Omit + if input.UniqueKeys != nil { + searchOpts.UniqueKeys = *input.UniqueKeys + } + if input.After != nil && *input.After != "" { + err = search.ParseCursor(*input.After, &searchOpts) + if err != nil { + return conn, err + } + } + if input.First != nil { + searchOpts.Limit = *input.First + } + if searchOpts.Limit == 0 { + searchOpts.Limit = 15 + } + + searchOpts.Limit++ + labels, err := q.LabelStore.Search(ctx, &searchOpts) + if err != nil { + return nil, err + } + conn = new(graphql2.LabelConnection) + if len(labels) == searchOpts.Limit { + labels = labels[:len(labels)-1] + conn.PageInfo.HasNextPage = true + } + if len(labels) > 0 { + last := labels[len(labels)-1] + searchOpts.After.Key = last.Key + searchOpts.After.TargetType = last.Target.TargetType() + searchOpts.After.TargetID = last.Target.TargetID() + + cur, err := search.Cursor(searchOpts) + if err != nil { + return nil, err + } + conn.PageInfo.EndCursor = &cur + } + conn.Nodes = labels + return conn, err +} +func (m *Mutation) SetLabel(ctx context.Context, input graphql2.SetLabelInput) (bool, error) { + err := withContextTx(ctx, m.DB, func(ctx context.Context, tx *sql.Tx) error { + cfg := config.FromContext(ctx) + if cfg.General.DisableLabelCreation { + allLabels, err := m.LabelStore.UniqueKeysTx(ctx, tx) + if err != nil { + return err + } + var keyExists bool + for _, l := range allLabels { + if input.Key == l { + keyExists = true + break + } + } + if !keyExists { + return validation.NewFieldError("Key", "Creating new labels is currently disabled.") + } + } + + return m.LabelStore.SetTx(ctx, tx, &label.Label{ + Key: input.Key, + Value: input.Value, + Target: input.Target, + }) + }) + if err != nil { + return false, err + } + + return true, nil +} diff --git a/graphql2/graphqlapp/mutation.go b/graphql2/graphqlapp/mutation.go new file mode 100644 index 0000000000..3465dc4df2 --- /dev/null +++ b/graphql2/graphqlapp/mutation.go @@ -0,0 +1,119 @@ +package graphqlapp + +import ( + context "context" + + "github.com/target/goalert/assignment" + "github.com/target/goalert/graphql2" + "github.com/target/goalert/permission" + "github.com/target/goalert/user" + "github.com/target/goalert/validation" + + "github.com/pkg/errors" +) + +type Mutation App + +func (a *App) Mutation() graphql2.MutationResolver { return (*Mutation)(a) } + +func (a *Mutation) SetFavorite(ctx context.Context, input graphql2.SetFavoriteInput) (bool, error) { + var err error + if input.Favorite { + err = a.FavoriteStore.Set(ctx, permission.UserID(ctx), input.Target) + } else { + err = a.FavoriteStore.Unset(ctx, permission.UserID(ctx), input.Target) + } + + if err != nil { + return false, err + } + return true, nil +} +func (a *Mutation) TestContactMethod(ctx context.Context, id string) (bool, error) { + err := a.NotificationStore.SendContactMethodTest(ctx, id) + if err != nil { + return false, err + } + + return true, nil +} + +func (a *Mutation) AddAuthSubject(ctx context.Context, input user.AuthSubject) (bool, error) { + err := a.UserStore.AddAuthSubjectTx(ctx, nil, &input) + if err != nil { + return false, err + } + return true, nil +} + +func (a *Mutation) DeleteAuthSubject(ctx context.Context, input user.AuthSubject) (bool, error) { + err := a.UserStore.DeleteAuthSubjectTx(ctx, nil, &input) + if err != nil { + return false, err + } + return true, nil +} + +func (a *Mutation) DeleteAll(ctx context.Context, input []assignment.RawTarget) (bool, error) { + tx, err := a.DB.BeginTx(ctx, nil) + if err != nil { + return false, err + } + defer tx.Rollback() + + m := make(map[assignment.TargetType][]string) + for _, tgt := range input { + m[tgt.TargetType()] = append(m[tgt.TargetType()], tgt.TargetID()) + } + + order := []assignment.TargetType{ + assignment.TargetTypeRotation, + assignment.TargetTypeUserOverride, + assignment.TargetTypeSchedule, + assignment.TargetTypeUser, + assignment.TargetTypeIntegrationKey, + assignment.TargetTypeService, + assignment.TargetTypeEscalationPolicy, + assignment.TargetTypeNotificationRule, + assignment.TargetTypeContactMethod, + } + + for _, typ := range order { + ids := m[typ] + if len(ids) == 0 { + continue + } + switch typ { + case assignment.TargetTypeUserOverride: + err = errors.Wrap(a.OverrideStore.DeleteUserOverrideTx(ctx, tx, ids...), "delete user overrides") + case assignment.TargetTypeUser: + err = errors.Wrap(a.UserStore.DeleteManyTx(ctx, tx, ids), "delete users") + case assignment.TargetTypeService: + err = errors.Wrap(a.ServiceStore.DeleteManyTx(ctx, tx, ids), "delete services") + case assignment.TargetTypeEscalationPolicy: + err = errors.Wrap(a.PolicyStore.DeleteManyPoliciesTx(ctx, tx, ids), "delete escalation policies") + case assignment.TargetTypeIntegrationKey: + err = errors.Wrap(a.IntKeyStore.DeleteManyTx(ctx, tx, ids), "delete integration keys") + case assignment.TargetTypeSchedule: + err = errors.Wrap(a.ScheduleStore.DeleteManyTx(ctx, tx, ids), "delete schedules") + case assignment.TargetTypeRotation: + err = errors.Wrap(a.RotationStore.DeleteManyTx(ctx, tx, ids), "delete rotations") + case assignment.TargetTypeContactMethod: + err = errors.Wrap(a.CMStore.DeleteTx(ctx, tx, ids...), "delete contact methods") + case assignment.TargetTypeNotificationRule: + err = errors.Wrap(a.NRStore.DeleteTx(ctx, tx, ids...), "delete notification rules") + default: + return false, validation.NewFieldError("type", "unsupported type "+typ.String()) + } + if err != nil { + return false, err + } + } + + err = tx.Commit() + if err != nil { + return false, err + } + + return true, nil +} diff --git a/graphql2/graphqlapp/notificationrule.go b/graphql2/graphqlapp/notificationrule.go new file mode 100644 index 0000000000..09527b7950 --- /dev/null +++ b/graphql2/graphqlapp/notificationrule.go @@ -0,0 +1,45 @@ +package graphqlapp + +import ( + context "context" + "database/sql" + + "github.com/target/goalert/graphql2" + "github.com/target/goalert/user/contactmethod" + "github.com/target/goalert/user/notificationrule" +) + +type UserNotificationRule App + +func (a *App) UserNotificationRule() graphql2.UserNotificationRuleResolver { + return (*UserNotificationRule)(a) +} +func (m *Mutation) CreateUserNotificationRule(ctx context.Context, input graphql2.CreateUserNotificationRuleInput) (*notificationrule.NotificationRule, error) { + nr := ¬ificationrule.NotificationRule{ + DelayMinutes: input.DelayMinutes, + } + + if input.UserID != nil { + nr.UserID = *input.UserID + } + + if input.ContactMethodID != nil { + nr.ContactMethodID = *input.ContactMethodID + } + + err := withContextTx(ctx, m.DB, func(ctx context.Context, tx *sql.Tx) error { + var err error + nr, err = m.NRStore.CreateTx(ctx, tx, nr) + return err + }) + + if err != nil { + return nil, err + } + + return nr, nil +} + +func (nr *UserNotificationRule) ContactMethod(ctx context.Context, raw *notificationrule.NotificationRule) (*contactmethod.ContactMethod, error) { + return (*App)(nr).FindOneCM(ctx, raw.ContactMethodID) +} diff --git a/graphql2/graphqlapp/oncall.go b/graphql2/graphqlapp/oncall.go new file mode 100644 index 0000000000..bdd61516d6 --- /dev/null +++ b/graphql2/graphqlapp/oncall.go @@ -0,0 +1,16 @@ +package graphqlapp + +import ( + context "context" + "github.com/target/goalert/graphql2" + "github.com/target/goalert/oncall" + "github.com/target/goalert/user" +) + +type OnCallShift App + +func (a *App) OnCallShift() graphql2.OnCallShiftResolver { return (*OnCallShift)(a) } + +func (oc *OnCallShift) User(ctx context.Context, raw *oncall.Shift) (*user.User, error) { + return (*App)(oc).FindOneUser(ctx, raw.UserID) +} diff --git a/graphql2/graphqlapp/playground.go b/graphql2/graphqlapp/playground.go new file mode 100644 index 0000000000..0ee4ec3090 --- /dev/null +++ b/graphql2/graphqlapp/playground.go @@ -0,0 +1,43 @@ +package graphqlapp + +import "html/template" + +const playVersion = "1.7.11" + +const playHTML = ` + + + + + + + + + GoAlert - GraphQL API + + + + +
+ + + +` + +var playTmpl = template.Must(template.New("graphqlPlayground").Parse(playHTML)) diff --git a/graphql2/graphqlapp/query.go b/graphql2/graphqlapp/query.go new file mode 100644 index 0000000000..f16353383c --- /dev/null +++ b/graphql2/graphqlapp/query.go @@ -0,0 +1,65 @@ +package graphqlapp + +import ( + context "context" + "github.com/target/goalert/graphql2" + "github.com/target/goalert/search" + "github.com/target/goalert/validation/validate" + + "github.com/pkg/errors" +) + +type Query App + +func (a *App) Query() graphql2.QueryResolver { return (*Query)(a) } + +func (a *Query) AuthSubjectsForProvider(ctx context.Context, _first *int, _after *string, providerID string) (conn *graphql2.AuthSubjectConnection, err error) { + var first int + var after string + if _after != nil { + after = *_after + } + if _first != nil { + first = *_first + } else { + first = 15 + } + err = validate.Range("First", first, 1, 300) + if err != nil { + return nil, err + } + + var c struct { + ProviderID string + LastID string + } + + if after != "" { + err = search.ParseCursor(after, &c) + if err != nil { + return nil, errors.Wrap(err, "parse cursor") + } + } else { + c.ProviderID = providerID + } + + conn = new(graphql2.AuthSubjectConnection) + conn.Nodes, err = a.UserStore.FindSomeAuthSubjectsForProvider(ctx, first+1, c.LastID, c.ProviderID) + if err != nil { + return nil, err + } + if len(conn.Nodes) > first { + conn.Nodes = conn.Nodes[:first] + conn.PageInfo.HasNextPage = true + } + if len(conn.Nodes) > 0 { + c.LastID = conn.Nodes[len(conn.Nodes)-1].SubjectID + } + + cur, err := search.Cursor(c) + if err != nil { + return nil, err + } + conn.PageInfo.EndCursor = &cur + return conn, nil +} diff --git a/graphql2/graphqlapp/rotation.go b/graphql2/graphqlapp/rotation.go new file mode 100644 index 0000000000..10831a2139 --- /dev/null +++ b/graphql2/graphqlapp/rotation.go @@ -0,0 +1,353 @@ +package graphqlapp + +import ( + context "context" + "database/sql" + "github.com/target/goalert/graphql2" + "github.com/target/goalert/schedule/rotation" + "github.com/target/goalert/search" + "github.com/target/goalert/user" + "github.com/target/goalert/util" + "github.com/target/goalert/validation" + "github.com/target/goalert/validation/validate" + "time" + + "github.com/pkg/errors" +) + +type Rotation App + +func (a *App) Rotation() graphql2.RotationResolver { return (*Rotation)(a) } + +func (q *Query) Rotation(ctx context.Context, id string) (*rotation.Rotation, error) { + return (*App)(q).FindOneRotation(ctx, id) +} + +func (m *Mutation) CreateRotation(ctx context.Context, input graphql2.CreateRotationInput) (result *rotation.Rotation, err error) { + loc, err := util.LoadLocation(input.TimeZone) + if err != nil { + return nil, validation.NewFieldError("TimeZone", err.Error()) + } + err = withContextTx(ctx, m.DB, func(ctx context.Context, tx *sql.Tx) error { + rot := &rotation.Rotation{ + Name: input.Name, + Type: input.Type, + Start: input.Start.In(loc), + } + if input.Description != nil { + rot.Description = *input.Description + } + if input.ShiftLength != nil { + rot.ShiftLength = *input.ShiftLength + } + + result, err = m.RotationStore.CreateRotationTx(ctx, tx, rot) + if err != nil { + return err + } + + if input.UserIDs != nil { + err := m.RotationStore.AddRotationUsersTx(ctx, tx, result.ID, input.UserIDs) + if err != nil { + return err + } + } + return err + }) + + return result, err +} + +func (r *Rotation) TimeZone(ctx context.Context, rot *rotation.Rotation) (string, error) { + return rot.Start.Location().String(), nil +} + +func (r *Rotation) NextHandoffTimes(ctx context.Context, rot *rotation.Rotation, num *int) ([]time.Time, error) { + var n int + if num != nil { + n = *num + } else { + count, err := r.RotationStore.FindParticipantCount(ctx, rot.ID) + if err != nil { + return nil, errors.Wrap(err, "retrieving participant count") + } + if count > 50 { + // setting to max limit for validation + n = 50 + } else { + n = count + } + } + + err := validate.Range("num", n, 0, 50) + if err != nil { + return nil, err + } + + s, err := r.RotationStore.State(ctx, rot.ID) + if err == rotation.ErrNoState { + return nil, nil + } + if err != nil { + return nil, err + } + + result := make([]time.Time, n) + t := s.ShiftStart + for i := range result { + t = rot.EndTime(t) + result[i] = t + } + + return result, nil +} + +func (r *Rotation) UserIDs(ctx context.Context, rot *rotation.Rotation) ([]string, error) { + parts, err := r.RotationStore.FindAllParticipants(ctx, rot.ID) + if err != nil { + return nil, err + } + + ids := make([]string, len(parts)) + for i, p := range parts { + ids[i] = p.Target.TargetID() + } + + return ids, nil +} + +func (r *Rotation) Users(ctx context.Context, rot *rotation.Rotation) ([]user.User, error) { + userIDs, err := r.UserIDs(ctx, rot) + if err != nil { + return nil, err + } + + users := make([]user.User, len(userIDs)) + errCh := make(chan error, len(userIDs)) + for i := range userIDs { + // TODO: does this need to be bounded? + // The max number = the max number of unique users in the current rotation, + // which can be bounded by the config_limit participants_per_rotation (but isn't by default). + go func(idx int) { + u, err := (*App)(r).FindOneUser(ctx, userIDs[idx]) + if err == nil { + users[idx] = *u + } + errCh <- err + }(i) + } + + for range userIDs { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case err := <-errCh: + if err != nil { + return nil, err + } + } + } + + return users, nil +} + +func (r *Rotation) ActiveUserIndex(ctx context.Context, obj *rotation.Rotation) (int, error) { + s, err := r.RotationStore.State(ctx, obj.ID) + if err == rotation.ErrNoState { + return -1, nil + } + if err != nil { + return -1, err + } + return s.Position, err +} + +func (q *Query) Rotations(ctx context.Context, opts *graphql2.RotationSearchOptions) (conn *graphql2.RotationConnection, err error) { + if opts == nil { + opts = &graphql2.RotationSearchOptions{} + } + + var searchOpts rotation.SearchOptions + if opts.Search != nil { + searchOpts.Search = *opts.Search + } + searchOpts.Omit = opts.Omit + if opts.After != nil && *opts.After != "" { + err = search.ParseCursor(*opts.After, &searchOpts) + if err != nil { + return nil, err + } + } + if opts.First != nil { + searchOpts.Limit = *opts.First + } + if searchOpts.Limit == 0 { + searchOpts.Limit = 15 + } + + searchOpts.Limit++ + rots, err := q.RotationStore.Search(ctx, &searchOpts) + if err != nil { + return nil, err + } + conn = new(graphql2.RotationConnection) + if len(rots) == searchOpts.Limit { + rots = rots[:len(rots)-1] + conn.PageInfo.HasNextPage = true + } + if len(rots) > 0 { + last := rots[len(rots)-1] + searchOpts.After.Name = last.Name + + cur, err := search.Cursor(searchOpts) + if err != nil { + return conn, err + } + conn.PageInfo.EndCursor = &cur + } + conn.Nodes = rots + return conn, err +} + +func (m *Mutation) updateRotationParticipants(ctx context.Context, tx *sql.Tx, rotationID string, userIDs []string, updateActive bool) (err error) { + // Get current participants + currentParticipants, err := m.RotationStore.FindAllParticipantsTx(ctx, tx, rotationID) + if err != nil { + return err + } + + var participantIDsToRemove []string + + for i, c := range currentParticipants { + if i >= len(userIDs) { + participantIDsToRemove = append(participantIDsToRemove, c.ID) + continue + } + + if c.Target.TargetID() == userIDs[i] { + // nothing to update + continue + } + + // Update + err = m.RotationStore.UpdateParticipantUserIDTx(ctx, tx, c.ID, userIDs[i]) + if err != nil { + return err + } + } + + if len(userIDs) > len(currentParticipants) { + // Add users + err = m.RotationStore.AddRotationUsersTx(ctx, tx, rotationID, userIDs[len(currentParticipants):]) + if err != nil { + return err + } + } + + if len(participantIDsToRemove) == 0 { + return nil + } + + if len(userIDs) == 0 { + // Delete rotation state if all users are going to be deleted as per new input + err = m.RotationStore.DeleteStateTx(ctx, tx, rotationID) + if err != nil { + return err + } + } else if updateActive { + // get current active participant + s, err := m.RotationStore.StateTx(ctx, tx, rotationID) + if err == rotation.ErrNoState { + return nil + } + if err != nil { + return err + } + + // if currently active user is going to be deleted + // then set to first user before we actually delete any users + if s.Position >= len(userIDs) { + err = m.RotationStore.SetActiveIndexTx(ctx, tx, rotationID, 0) + if err != nil { + return err + } + } + } + + err = m.RotationStore.DeleteRotationParticipantsTx(ctx, tx, participantIDsToRemove) + if err != nil { + return err + } + return nil + +} + +func (m *Mutation) UpdateRotation(ctx context.Context, input graphql2.UpdateRotationInput) (res bool, err error) { + err = withContextTx(ctx, m.DB, func(ctx context.Context, tx *sql.Tx) error { + result, err := m.RotationStore.FindRotationForUpdateTx(ctx, tx, input.ID) + if err != nil { + return err + } + var update bool + if input.Name != nil { + update = true + result.Name = *input.Name + } + if input.Description != nil { + update = true + result.Description = *input.Description + } + if input.Start != nil { + update = true + result.Start = *input.Start + } + if input.Type != nil { + update = true + result.Type = *input.Type + } + if input.ShiftLength != nil { + update = true + result.ShiftLength = *input.ShiftLength + } + + if input.TimeZone != nil { + update = true + loc, err := util.LoadLocation(*input.TimeZone) + if err != nil { + return validation.NewFieldError("TimeZone", "invalid TimeZone: "+err.Error()) + } + result.Start = result.Start.In(loc) + } + + if update { + err = m.RotationStore.UpdateRotationTx(ctx, tx, result) + if err != nil { + + return err + } + } + + if input.UserIDs != nil { + err = m.updateRotationParticipants(ctx, tx, input.ID, input.UserIDs, input.ActiveUserIndex == nil) + if err != nil { + return err + } + } + + // Update active participant (in rotation state) if specified by input + // This should be applicable regardless of whether or not 'UserIDs' as an input has been specified. + if input.ActiveUserIndex != nil { + err = m.RotationStore.SetActiveIndexTx(ctx, tx, input.ID, *input.ActiveUserIndex) + if err != nil { + return err + } + } + + return err + }) + + if err != nil { + return false, err + } + return true, nil +} diff --git a/graphql2/graphqlapp/schedule.go b/graphql2/graphqlapp/schedule.go new file mode 100644 index 0000000000..bb68325016 --- /dev/null +++ b/graphql2/graphqlapp/schedule.go @@ -0,0 +1,250 @@ +package graphqlapp + +import ( + context "context" + "database/sql" + "fmt" + "github.com/target/goalert/assignment" + "github.com/target/goalert/graphql2" + "github.com/target/goalert/oncall" + "github.com/target/goalert/schedule" + "github.com/target/goalert/schedule/rule" + "github.com/target/goalert/search" + "github.com/target/goalert/util" + "github.com/target/goalert/validation" + "github.com/target/goalert/validation/validate" + "sort" + "strconv" + "strings" + "time" +) + +type Schedule App + +func (a *App) Schedule() graphql2.ScheduleResolver { return (*Schedule)(a) } + +func (q *Query) Schedule(ctx context.Context, id string) (*schedule.Schedule, error) { + return (*App)(q).FindOneSchedule(ctx, id) +} +func (s *Schedule) Shifts(ctx context.Context, raw *schedule.Schedule, start, end time.Time) ([]oncall.Shift, error) { + if end.Before(start) { + return nil, validation.NewFieldError("EndTime", "must be after StartTime") + } + if end.After(start.AddDate(0, 0, 50)) { + return nil, validation.NewFieldError("EndTime", "cannot be more than 50 days past StartTime") + } + return s.OnCallStore.HistoryBySchedule(ctx, raw.ID, start, end) +} + +func (s *Schedule) Target(ctx context.Context, raw *schedule.Schedule, input assignment.RawTarget) (*graphql2.ScheduleTarget, error) { + rules, err := s.RuleStore.FindByTargetTx(ctx, nil, raw.ID, input) + if err != nil { + return nil, err + } + + return &graphql2.ScheduleTarget{ + ScheduleID: raw.ID, + Target: input, + Rules: rules, + }, nil +} + +func (s *Schedule) Targets(ctx context.Context, raw *schedule.Schedule) ([]graphql2.ScheduleTarget, error) { + rules, err := s.RuleStore.FindAll(ctx, raw.ID) + if err != nil { + return nil, err + } + + m := make(map[assignment.RawTarget][]rule.Rule) + for _, r := range rules { + tgt := assignment.RawTarget{ID: r.Target.TargetID(), Type: r.Target.TargetType()} + m[tgt] = append(m[tgt], r) + } + + result := make([]graphql2.ScheduleTarget, 0, len(m)) + for tgt, rules := range m { + result = append(result, graphql2.ScheduleTarget{ + Target: tgt, + ScheduleID: raw.ID, + Rules: rules, + }) + } + + return result, nil +} +func (s *Schedule) AssignedTo(ctx context.Context, raw *schedule.Schedule) ([]assignment.RawTarget, error) { + pols, err := s.PolicyStore.FindAllPoliciesBySchedule(ctx, raw.ID) + if err != nil { + return nil, err + } + sort.Slice(pols, func(i, j int) bool { return strings.ToLower(pols[i].Name) < strings.ToLower(pols[j].Name) }) + + tgt := make([]assignment.RawTarget, len(pols)) + for i, p := range pols { + tgt[i] = assignment.RawTarget{ + ID: p.ID, + Name: p.Name, + Type: assignment.TargetTypeEscalationPolicy, + } + } + + return tgt, nil +} +func (m *Mutation) UpdateSchedule(ctx context.Context, input graphql2.UpdateScheduleInput) (ok bool, err error) { + var loc *time.Location + if input.TimeZone != nil { + loc, err = util.LoadLocation(*input.TimeZone) + if err != nil { + return false, validation.NewFieldError("timeZone", err.Error()) + } + } + err = withContextTx(ctx, m.DB, func(ctx context.Context, tx *sql.Tx) error { + sched, err := m.ScheduleStore.FindOneForUpdate(ctx, tx, input.ID) + if err != nil { + return err + } + if input.Name != nil { + sched.Name = *input.Name + } + if input.Description != nil { + sched.Description = *input.Description + } + + if loc != nil { + sched.TimeZone = loc + } + + return m.ScheduleStore.UpdateTx(ctx, tx, sched) + }) + + return err == nil, err +} + +func (m *Mutation) CreateSchedule(ctx context.Context, input graphql2.CreateScheduleInput) (sched *schedule.Schedule, err error) { + usedTargets := make(map[assignment.RawTarget]int, len(input.Targets)) + + for i, tgt := range input.Targets { + fieldPrefix := fmt.Sprintf("targets[%d].", i) + + // validating both are not nil + if tgt.NewRotation == nil && tgt.Target == nil { + return nil, validate.Many( + validation.NewFieldError(fieldPrefix+"target", "one of `target` or `newRotation` is required"), + validation.NewFieldError(fieldPrefix+"newRotation", "one of `target` or `newRotation` is required"), + ) + } + + // validating only one is present + if tgt.NewRotation != nil && tgt.Target != nil { + return nil, validate.Many( + validation.NewFieldError(fieldPrefix+"target", "cannot be used with `newRotation`"), + validation.NewFieldError(fieldPrefix+"newRotation", "cannot be used with `target`"), + ) + } + + // checking for duplicate targets + if tgt.Target != nil { + raw := assignment.NewRawTarget(tgt.Target) + if oldIndex, ok := usedTargets[raw]; ok { + return nil, validation.NewFieldError(fieldPrefix+"target", fmt.Sprintf("must be unique. Conflicts with existing `targets[%d].target`.", oldIndex)) + } + usedTargets[raw] = i + } + + } + + loc, err := util.LoadLocation(input.TimeZone) + if err != nil { + return nil, validation.NewFieldError("timeZone", err.Error()) + } + + err = withContextTx(ctx, m.DB, func(ctx context.Context, tx *sql.Tx) error { + s := &schedule.Schedule{ + Name: input.Name, + TimeZone: loc, + } + if input.Description != nil { + s.Description = *input.Description + } + sched, err = m.ScheduleStore.CreateScheduleTx(ctx, tx, s) + if err != nil { + return err + } + + for i := range input.Targets { + if input.Targets[i].NewRotation == nil { + continue + } + rot, err := m.CreateRotation(ctx, *input.Targets[i].NewRotation) + if err != nil { + return validation.AddPrefix("targets["+strconv.Itoa(i)+"].newRotation.", err) + } + // Inserting newly created rotation as 'target' with it's corresponding rules + input.Targets[i].Target = &assignment.RawTarget{Type: assignment.TargetTypeRotation, ID: rot.ID, Name: rot.Name} + + } + + for i, r := range input.Targets { + r.ScheduleID = &sched.ID + _, err = m.UpdateScheduleTarget(ctx, r) + if err != nil { + return validation.AddPrefix("targets["+strconv.Itoa(i)+"].", err) + } + } + + return nil + }) + + return sched, err +} + +func (r *Schedule) TimeZone(ctx context.Context, data *schedule.Schedule) (string, error) { + return data.TimeZone.String(), nil +} + +func (q *Query) Schedules(ctx context.Context, opts *graphql2.ScheduleSearchOptions) (conn *graphql2.ScheduleConnection, err error) { + if opts == nil { + opts = &graphql2.ScheduleSearchOptions{} + } + + var searchOpts schedule.SearchOptions + if opts.Search != nil { + searchOpts.Search = *opts.Search + } + searchOpts.Omit = opts.Omit + if opts.After != nil && *opts.After != "" { + err = search.ParseCursor(*opts.After, &searchOpts) + if err != nil { + return nil, err + } + } + if opts.First != nil { + searchOpts.Limit = *opts.First + } + if searchOpts.Limit == 0 { + searchOpts.Limit = 15 + } + + searchOpts.Limit++ + scheds, err := q.ScheduleStore.Search(ctx, &searchOpts) + if err != nil { + return nil, err + } + conn = new(graphql2.ScheduleConnection) + if len(scheds) == searchOpts.Limit { + scheds = scheds[:len(scheds)-1] + conn.PageInfo.HasNextPage = true + } + if len(scheds) > 0 { + last := scheds[len(scheds)-1] + searchOpts.After.Name = last.Name + + cur, err := search.Cursor(searchOpts) + if err != nil { + return conn, err + } + conn.PageInfo.EndCursor = &cur + } + conn.Nodes = scheds + return conn, err +} diff --git a/graphql2/graphqlapp/schedulerule.go b/graphql2/graphqlapp/schedulerule.go new file mode 100644 index 0000000000..ac614db300 --- /dev/null +++ b/graphql2/graphqlapp/schedulerule.go @@ -0,0 +1,99 @@ +package graphqlapp + +import ( + context "context" + "database/sql" + "github.com/target/goalert/assignment" + "github.com/target/goalert/graphql2" + "github.com/target/goalert/permission" + "github.com/target/goalert/schedule/rule" + "github.com/target/goalert/validation" + "strconv" + + "github.com/pkg/errors" +) + +type ScheduleRule App + +func (a *App) ScheduleRule() graphql2.ScheduleRuleResolver { return (*ScheduleRule)(a) } +func (r *ScheduleRule) Target(ctx context.Context, raw *rule.Rule) (*assignment.RawTarget, error) { + tgt := assignment.NewRawTarget(raw.Target) + return &tgt, nil +} +func (r *ScheduleRule) WeekdayFilter(ctx context.Context, raw *rule.Rule) ([]bool, error) { + var f [7]bool + for i, v := range raw.WeekdayFilter { + f[i] = v == 1 + } + return f[:], nil +} + +func (m *Mutation) UpdateScheduleTarget(ctx context.Context, input graphql2.ScheduleTargetInput) (bool, error) { + var schedID string + if input.ScheduleID != nil { + schedID = *input.ScheduleID + } + if input.Target.Type == assignment.TargetTypeUser && input.Target.ID == "__current_user" { + input.Target.ID = permission.UserID(ctx) + } + err := withContextTx(ctx, m.DB, func(ctx context.Context, tx *sql.Tx) error { + _, err := m.ScheduleStore.FindOneForUpdate(ctx, tx, schedID) // lock schedule + if err != nil { + return errors.Wrap(err, "lock schedule") + } + + rules, err := m.RuleStore.FindByTargetTx(ctx, tx, schedID, input.Target) + if err != nil { + return errors.Wrap(err, "fetch existing rules") + } + rulesByID := make(map[string]*rule.Rule, len(rules)) + for i := range rules { + rulesByID[rules[i].ID] = &rules[i] + } + + updated := make(map[string]bool, len(rules)) + for i, inputRule := range input.Rules { + r := rule.NewAlwaysActive(schedID, input.Target) + if inputRule.ID != nil { + // doing an update + if rulesByID[*inputRule.ID] == nil { + return validation.NewFieldError("rules["+strconv.Itoa(i)+"]", "does not exist") + } + r = rulesByID[*inputRule.ID] + } + if inputRule.Start != nil { + r.Start = *inputRule.Start + } + if inputRule.End != nil { + r.End = *inputRule.End + } + for i, v := range inputRule.WeekdayFilter { + if !v { + r.WeekdayFilter[i] = 0 + } + } + + if inputRule.ID != nil { + updated[*inputRule.ID] = true + err = errors.Wrap(m.RuleStore.UpdateTx(ctx, tx, r), "update rule") + } else { + _, err = m.RuleStore.CreateRuleTx(ctx, tx, r) + err = errors.Wrap(err, "create rule") + } + if err != nil { + return err + } + } + + toDelete := make([]string, 0, len(rules)-len(updated)) + for _, rule := range rules { + if updated[rule.ID] { + continue + } + toDelete = append(toDelete, rule.ID) + } + + return errors.Wrap(m.RuleStore.DeleteManyTx(ctx, tx, toDelete), "delete old rules") + }) + return err == nil, err +} diff --git a/graphql2/graphqlapp/service.go b/graphql2/graphqlapp/service.go new file mode 100644 index 0000000000..2f556479bd --- /dev/null +++ b/graphql2/graphqlapp/service.go @@ -0,0 +1,205 @@ +package graphqlapp + +import ( + context "context" + "database/sql" + "github.com/target/goalert/assignment" + "github.com/target/goalert/escalation" + "github.com/target/goalert/graphql2" + "github.com/target/goalert/integrationkey" + "github.com/target/goalert/label" + "github.com/target/goalert/oncall" + "github.com/target/goalert/permission" + "github.com/target/goalert/search" + "github.com/target/goalert/service" + "github.com/target/goalert/validation" + "github.com/target/goalert/validation/validate" + "strconv" +) + +const tempUUID = "00000000-0000-0000-0000-000000000000" + +type Service App + +func (a *App) Service() graphql2.ServiceResolver { return (*Service)(a) } + +func (q *Query) Service(ctx context.Context, id string) (*service.Service, error) { + return (*App)(q).FindOneService(ctx, id) +} +func (q *Query) Services(ctx context.Context, opts *graphql2.ServiceSearchOptions) (conn *graphql2.ServiceConnection, err error) { + if opts == nil { + opts = &graphql2.ServiceSearchOptions{} + } + + var searchOpts service.SearchOptions + searchOpts.FavoritesUserID = permission.UserID(ctx) + if opts.Search != nil { + searchOpts.Search = *opts.Search + } + if opts.FavoritesOnly != nil { + searchOpts.FavoritesOnly = *opts.FavoritesOnly + } + if opts.FavoritesFirst != nil { + searchOpts.FavoritesFirst = *opts.FavoritesFirst + } + searchOpts.Omit = opts.Omit + if opts.After != nil && *opts.After != "" { + err = search.ParseCursor(*opts.After, &searchOpts) + if err != nil { + return nil, err + } + } + if opts.First != nil { + searchOpts.Limit = *opts.First + } + if searchOpts.Limit == 0 { + searchOpts.Limit = 15 + } + + searchOpts.Limit++ + svcs, err := q.ServiceStore.Search(ctx, &searchOpts) + if err != nil { + return nil, err + } + conn = new(graphql2.ServiceConnection) + if len(svcs) == searchOpts.Limit { + svcs = svcs[:len(svcs)-1] + conn.PageInfo.HasNextPage = true + } + if len(svcs) > 0 { + last := svcs[len(svcs)-1] + searchOpts.After.IsFavorite = last.IsUserFavorite() + searchOpts.After.Name = last.Name + + cur, err := search.Cursor(searchOpts) + if err != nil { + return conn, err + } + conn.PageInfo.EndCursor = &cur + } + conn.Nodes = svcs + return conn, err +} + +func (s *Service) Labels(ctx context.Context, raw *service.Service) ([]label.Label, error) { + return s.LabelStore.FindAllByService(ctx, raw.ID) +} + +func (s *Service) EscalationPolicy(ctx context.Context, raw *service.Service) (*escalation.Policy, error) { + return (*App)(s).FindOnePolicy(ctx, raw.EscalationPolicyID) +} +func (s *Service) IsFavorite(ctx context.Context, raw *service.Service) (bool, error) { + return raw.IsUserFavorite(), nil +} +func (s *Service) OnCallUsers(ctx context.Context, raw *service.Service) ([]oncall.ServiceOnCallUser, error) { + return s.OnCallStore.OnCallUsersByService(ctx, raw.ID) +} +func (s *Service) IntegrationKeys(ctx context.Context, raw *service.Service) ([]integrationkey.IntegrationKey, error) { + return s.IntKeyStore.FindAllByService(ctx, raw.ID) +} + +func (m *Mutation) CreateService(ctx context.Context, input graphql2.CreateServiceInput) (result *service.Service, err error) { + if input.NewEscalationPolicy != nil && input.EscalationPolicyID != nil && *input.EscalationPolicyID != "" { + return nil, validation.NewFieldError("newEscalationPolicy", "cannot be used with `escalationPolicyID`.") + } + + err = withContextTx(ctx, m.DB, func(ctx context.Context, tx *sql.Tx) error { + svc := &service.Service{ + Name: input.Name, + } + if input.EscalationPolicyID != nil { + svc.EscalationPolicyID = *input.EscalationPolicyID + } + if input.Description != nil { + svc.Description = *input.Description + } + if input.NewEscalationPolicy != nil { + // Set tempUUID so that Normalize won't fail on the yet-to-be-created + // escalation policy. + // + // We want to fail on service validation errors before attempting to + // create the nested policy. + svc.EscalationPolicyID = tempUUID + } + _, err := svc.Normalize() + if err != nil { + return err + } + + if input.NewEscalationPolicy != nil { + ep, err := m.CreateEscalationPolicy(ctx, *input.NewEscalationPolicy) + if err != nil { + return validation.AddPrefix("newEscalationPolicy.", err) + } + svc.EscalationPolicyID = ep.ID + } + + result, err = m.ServiceStore.CreateServiceTx(ctx, tx, svc) + if err != nil { + return err + } + + err = validate.Many( + validate.Range("NewIntegrationKeys", len(input.NewIntegrationKeys), 0, 5), + validate.Range("Labels", len(input.Labels), 0, 5), + ) + if err != nil { + return err + } + + for i, key := range input.NewIntegrationKeys { + key.ServiceID = &result.ID + _, err = m.CreateIntegrationKey(ctx, key) + if err != nil { + return validation.AddPrefix("newIntegrationKeys["+strconv.Itoa(i)+"].", err) + } + } + + for i, lbl := range input.Labels { + lbl.Target = &assignment.RawTarget{Type: assignment.TargetTypeService, ID: result.ID} + _, err = m.SetLabel(ctx, lbl) + if err != nil { + return validation.AddPrefix("labels["+strconv.Itoa(i)+"].", err) + } + } + + return err + }) + + return result, err +} + +func (a *Mutation) UpdateService(ctx context.Context, input graphql2.UpdateServiceInput) (bool, error) { + tx, err := a.DB.BeginTx(ctx, nil) + if err != nil { + return false, err + } + defer tx.Rollback() + + svc, err := a.ServiceStore.FindOneForUpdate(ctx, tx, input.ID) + if err != nil { + return false, err + } + + if input.Name != nil { + svc.Name = *input.Name + } + if input.Description != nil { + svc.Description = *input.Description + } + if input.EscalationPolicyID != nil { + svc.EscalationPolicyID = *input.EscalationPolicyID + } + + err = a.ServiceStore.UpdateTx(ctx, tx, svc) + if err != nil { + return false, nil + } + + err = tx.Commit() + if err != nil { + return false, err + } + + return true, nil +} diff --git a/graphql2/graphqlapp/slack.go b/graphql2/graphqlapp/slack.go new file mode 100644 index 0000000000..d15ef8a6a8 --- /dev/null +++ b/graphql2/graphqlapp/slack.go @@ -0,0 +1,95 @@ +package graphqlapp + +import ( + context "context" + "sort" + "strings" + + "github.com/target/goalert/graphql2" + "github.com/target/goalert/notification/slack" + "github.com/target/goalert/search" +) + +func (q *Query) SlackChannel(ctx context.Context, id string) (*slack.Channel, error) { + return q.SlackStore.Channel(ctx, id) +} + +func (q *Query) SlackChannels(ctx context.Context, input *graphql2.SlackChannelSearchOptions) (conn *graphql2.SlackChannelConnection, err error) { + if input == nil { + input = &graphql2.SlackChannelSearchOptions{} + } + + var searchOpts struct { + Search string `json:"s,omitempty"` + Omit []string `json:"m,omitempty"` + After struct { + Name string `json:"n,omitempty"` + } `json:"a,omitempty"` + } + searchOpts.Omit = input.Omit + if input.Search != nil { + searchOpts.Search = *input.Search + } + if input.After != nil && *input.After != "" { + err = search.ParseCursor(*input.After, &searchOpts) + if err != nil { + return nil, err + } + } + + limit := 15 + if input.First != nil { + limit = *input.First + } + + channels, err := q.SlackStore.ListChannels(ctx) + if err != nil { + return nil, err + } + // Sort by name, case-insensitive, then sensitive. + sort.Slice(channels, func(i, j int) bool { + iName, jName := strings.ToLower(channels[i].Name), strings.ToLower(channels[j].Name) + + if iName != jName { + return iName < jName + } + return channels[i].Name < channels[j].Name + }) + + // No DB search, so we manually filter for the cursor and search strings. + s := strings.ToLower(searchOpts.Search) + n := strings.ToLower(searchOpts.After.Name) + filtered := channels[:0] + for _, ch := range channels { + chName := strings.ToLower(ch.Name) + if !strings.Contains(chName, s) { + continue + } + if n != "" && chName <= n { + continue + } + if contains(searchOpts.Omit, ch.ID) { + continue + } + filtered = append(filtered, ch) + } + channels = filtered + + conn = new(graphql2.SlackChannelConnection) + if len(channels) > limit { + channels = channels[:limit] + conn.PageInfo.HasNextPage = true + } + + if len(channels) > 0 { + searchOpts.After.Name = channels[len(channels)-1].Name + cur, err := search.Cursor(searchOpts) + if err != nil { + return conn, err + } + conn.PageInfo.EndCursor = &cur + } + + conn.Nodes = channels + return conn, err +} diff --git a/graphql2/graphqlapp/target.go b/graphql2/graphqlapp/target.go new file mode 100644 index 0000000000..a3fe353c76 --- /dev/null +++ b/graphql2/graphqlapp/target.go @@ -0,0 +1,54 @@ +package graphqlapp + +import ( + context "context" + "github.com/target/goalert/assignment" + "github.com/target/goalert/graphql2" + + "github.com/pkg/errors" +) + +type Target App + +func (a *App) Target() graphql2.TargetResolver { return (*Target)(a) } + +func (t *Target) Name(ctx context.Context, raw *assignment.RawTarget) (*string, error) { + if raw.Name != "" { + return &raw.Name, nil + } + switch raw.Type { + case assignment.TargetTypeRotation: + r, err := (*App)(t).FindOneRotation(ctx, raw.ID) + if err != nil { + return nil, err + } + return &r.Name, nil + case assignment.TargetTypeUser: + u, err := (*App)(t).FindOneUser(ctx, raw.ID) + if err != nil { + return nil, err + } + return &u.Name, nil + case assignment.TargetTypeEscalationPolicy: + ep, err := (*App)(t).FindOnePolicy(ctx, raw.ID) + if err != nil { + return nil, err + } + return &ep.Name, nil + case assignment.TargetTypeSchedule: + sched, err := (*App)(t).FindOneSchedule(ctx, raw.ID) + if err != nil { + return nil, err + } + return &sched.Name, nil + case assignment.TargetTypeService: + svc, err := (*App)(t).FindOneService(ctx, raw.ID) + if err != nil { + return nil, err + } + return &svc.Name, nil + + } + + return nil, errors.New("unhandled target type") +} diff --git a/graphql2/graphqlapp/timezone.go b/graphql2/graphqlapp/timezone.go new file mode 100644 index 0000000000..15b087bb7d --- /dev/null +++ b/graphql2/graphqlapp/timezone.go @@ -0,0 +1,60 @@ +package graphqlapp + +import ( + context "context" + "github.com/target/goalert/graphql2" + "github.com/target/goalert/search" + "github.com/target/goalert/timezone" +) + +func (q *Query) TimeZones(ctx context.Context, input *graphql2.TimeZoneSearchOptions) (conn *graphql2.TimeZoneConnection, err error) { + if input == nil { + input = &graphql2.TimeZoneSearchOptions{} + } + + var searchOpts timezone.SearchOptions + if input.Search != nil { + searchOpts.Search = *input.Search + } + searchOpts.Omit = input.Omit + + if input.After != nil && *input.After != "" { + err = search.ParseCursor(*input.After, &searchOpts) + if err != nil { + return nil, err + } + } + if input.First != nil { + searchOpts.Limit = *input.First + } + if searchOpts.Limit == 0 { + searchOpts.Limit = 15 + } + + searchOpts.Limit++ + names, err := q.TimeZoneStore.Search(ctx, &searchOpts) + if err != nil { + return nil, err + } + + conn = new(graphql2.TimeZoneConnection) + if len(names) == searchOpts.Limit { + names = names[:len(names)-1] + conn.PageInfo.HasNextPage = true + } + if len(names) > 0 { + last := names[len(names)-1] + searchOpts.After.Name = last + + cur, err := search.Cursor(searchOpts) + if err != nil { + return conn, err + } + conn.PageInfo.EndCursor = &cur + } + conn.Nodes = make([]graphql2.TimeZone, len(names)) + for i, n := range names { + conn.Nodes[i].ID = n + } + return conn, err +} diff --git a/graphql2/graphqlapp/tx.go b/graphql2/graphqlapp/tx.go new file mode 100644 index 0000000000..d304a6c152 --- /dev/null +++ b/graphql2/graphqlapp/tx.go @@ -0,0 +1,47 @@ +package graphqlapp + +import ( + context "context" + "database/sql" + "github.com/target/goalert/util/errutil" +) + +// withContextTx is a helper function that handles starting and using a single transaction for a request. +// +// If there is not already a nested transaction in the current context, one is started and a new +// context is passed to fn. +// The transaction is then given to the provided fn. +// +// Commit and Rollback are handled automatically. +// Any nested calls to `withContextTx` will inherit the original transaction from the new context. +func withContextTx(ctx context.Context, db *sql.DB, fn func(context.Context, *sql.Tx) error) error { + // Defining a static key to store the transaction within Context. The `0` value is arbitrary, + // it just needs to be a unique type/value pair, vs. other context values. + type ctxTx int + const txKey = ctxTx(0) + + run := func() error { + if tx, ok := ctx.Value(txKey).(*sql.Tx); ok { + // Transaction already exists, run fn with + // the original context, and pass in the open + // Tx. + return fn(ctx, tx) + } + + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return err + } + defer tx.Rollback() + + err = fn(context.WithValue(ctx, txKey, tx), tx) + if err != nil { + return err + } + + return tx.Commit() + } + + // Ensure returned DB errors are mapped. + return errutil.MapDBError(run()) +} diff --git a/graphql2/graphqlapp/user.go b/graphql2/graphqlapp/user.go new file mode 100644 index 0000000000..05bf990312 --- /dev/null +++ b/graphql2/graphqlapp/user.go @@ -0,0 +1,129 @@ +package graphqlapp + +import ( + context "context" + "database/sql" + + "github.com/pkg/errors" + "github.com/target/goalert/escalation" + "github.com/target/goalert/graphql2" + "github.com/target/goalert/permission" + "github.com/target/goalert/search" + "github.com/target/goalert/user" + "github.com/target/goalert/user/contactmethod" + "github.com/target/goalert/user/notificationrule" +) + +type User App + +func (a *App) User() graphql2.UserResolver { return (*User)(a) } + +func (a *User) AuthSubjects(ctx context.Context, obj *user.User) ([]user.AuthSubject, error) { + return a.UserStore.FindAllAuthSubjectsForUser(ctx, obj.ID) +} +func (a *User) Role(ctx context.Context, usr *user.User) (graphql2.UserRole, error) { + return graphql2.UserRole(usr.Role), nil +} + +func (a *User) ContactMethods(ctx context.Context, obj *user.User) ([]contactmethod.ContactMethod, error) { + return a.CMStore.FindAll(ctx, obj.ID) +} +func (a *User) NotificationRules(ctx context.Context, obj *user.User) ([]notificationrule.NotificationRule, error) { + return a.NRStore.FindAll(ctx, obj.ID) +} + +func (a *User) OnCallSteps(ctx context.Context, obj *user.User) ([]escalation.Step, error) { + return a.PolicyStore.FindAllOnCallStepsForUserTx(ctx, nil, obj.ID) +} + +func (a *Mutation) DeleteUser(ctx context.Context, id string) (bool, error) { + err := a.UserStore.Delete(ctx, id) + if err != nil { + return false, err + } + return true, nil +} +func (a *Mutation) UpdateUser(ctx context.Context, input graphql2.UpdateUserInput) (bool, error) { + err := withContextTx(ctx, a.DB, func(ctx context.Context, tx *sql.Tx) error { + usr, err := a.UserStore.FindOneTx(ctx, tx, input.ID, true) + if err != nil { + return err + } + if input.Name != nil { + usr.Name = *input.Name + } + if input.Role != nil { + usr.Role = permission.Role(*input.Role) + } + if input.Email != nil { + usr.Email = *input.Email + } + if input.StatusUpdateContactMethodID != nil { + usr.AlertStatusCMID = *input.StatusUpdateContactMethodID + } + return a.UserStore.UpdateTx(ctx, tx, usr) + }) + return err == nil, err +} + +func (q *Query) Users(ctx context.Context, opts *graphql2.UserSearchOptions, first *int, after, searchStr *string) (conn *graphql2.UserConnection, err error) { + if opts == nil { + opts = &graphql2.UserSearchOptions{ + First: first, + After: after, + Search: searchStr, + } + } + + var searchOpts user.SearchOptions + if opts.Search != nil { + searchOpts.Search = *opts.Search + } + searchOpts.Omit = opts.Omit + if opts.After != nil && *opts.After != "" { + err = search.ParseCursor(*opts.After, &searchOpts) + if err != nil { + return nil, errors.Wrap(err, "parse cursor") + } + } + if opts.First != nil { + searchOpts.Limit = *opts.First + } + if searchOpts.Limit == 0 { + searchOpts.Limit = 15 + } + + searchOpts.Limit++ + users, err := q.UserStore.Search(ctx, &searchOpts) + if err != nil { + return nil, err + } + + conn = new(graphql2.UserConnection) + if len(users) == searchOpts.Limit { + users = users[:len(users)-1] + conn.PageInfo.HasNextPage = true + } + if len(users) > 0 { + last := users[len(users)-1] + searchOpts.After.Name = last.Name + + cur, err := search.Cursor(searchOpts) + if err != nil { + return conn, err + } + conn.PageInfo.EndCursor = &cur + } + conn.Nodes = users + return conn, err +} + +func (a *Query) User(ctx context.Context, id *string) (*user.User, error) { + var userID string + if id != nil { + userID = *id + } else { + userID = permission.UserID(ctx) + } + return (*App)(a).FindOneUser(ctx, userID) +} diff --git a/graphql2/graphqlapp/useroverride.go b/graphql2/graphqlapp/useroverride.go new file mode 100644 index 0000000000..31709d3a10 --- /dev/null +++ b/graphql2/graphqlapp/useroverride.go @@ -0,0 +1,143 @@ +package graphqlapp + +import ( + context "context" + "database/sql" + "github.com/target/goalert/assignment" + "github.com/target/goalert/graphql2" + "github.com/target/goalert/override" + "github.com/target/goalert/search" + "github.com/target/goalert/user" +) + +type UserOverride App + +func (a *App) UserOverride() graphql2.UserOverrideResolver { return (*UserOverride)(a) } +func (q *Query) UserOverride(ctx context.Context, id string) (*override.UserOverride, error) { + return q.OverrideStore.FindOneUserOverrideTx(ctx, nil, id, false) +} + +func (m *Mutation) UpdateUserOverride(ctx context.Context, input graphql2.UpdateUserOverrideInput) (bool, error) { + err := withContextTx(ctx, m.DB, func(ctx context.Context, tx *sql.Tx) error { + u, err := m.OverrideStore.FindOneUserOverrideTx(ctx, tx, input.ID, true) + if err != nil { + return err + } + + if input.Start != nil { + u.Start = *input.Start + } + if input.End != nil { + u.End = *input.End + } + if input.AddUserID != nil { + u.AddUserID = *input.AddUserID + } + if input.RemoveUserID != nil { + u.RemoveUserID = *input.RemoveUserID + } + + return m.OverrideStore.UpdateUserOverrideTx(ctx, tx, u) + }) + if err != nil { + return false, err + } + return true, nil +} + +func (m *Mutation) CreateUserOverride(ctx context.Context, input graphql2.CreateUserOverrideInput) (*override.UserOverride, error) { + u := &override.UserOverride{ + Target: assignment.ScheduleTarget(input.ScheduleID), + Start: input.Start, + End: input.End, + } + if input.AddUserID != nil { + u.AddUserID = *input.AddUserID + } + if input.RemoveUserID != nil { + u.RemoveUserID = *input.RemoveUserID + } + err := withContextTx(ctx, m.DB, func(ctx context.Context, tx *sql.Tx) error { + var err error + u, err = m.OverrideStore.CreateUserOverrideTx(ctx, tx, u) + return err + }) + if err != nil { + return nil, err + } + return u, nil +} +func (u *UserOverride) AddUser(ctx context.Context, raw *override.UserOverride) (*user.User, error) { + if raw.AddUserID == "" { + return nil, nil + } + return (*App)(u).FindOneUser(ctx, raw.AddUserID) +} +func (u *UserOverride) RemoveUser(ctx context.Context, raw *override.UserOverride) (*user.User, error) { + if raw.RemoveUserID == "" { + return nil, nil + } + return (*App)(u).FindOneUser(ctx, raw.RemoveUserID) +} +func (u *UserOverride) Target(ctx context.Context, raw *override.UserOverride) (*assignment.RawTarget, error) { + tgt := assignment.NewRawTarget(raw.Target) + return &tgt, nil +} + +func (q *Query) UserOverrides(ctx context.Context, input *graphql2.UserOverrideSearchOptions) (conn *graphql2.UserOverrideConnection, err error) { + if input == nil { + input = &graphql2.UserOverrideSearchOptions{} + } + + var searchOpts override.SearchOptions + searchOpts.Omit = input.Omit + if input.After != nil && *input.After != "" { + err = search.ParseCursor(*input.After, &searchOpts) + if err != nil { + return nil, err + } + } else { + searchOpts.AddUserIDs = input.FilterAddUserID + searchOpts.RemoveUserIDs = input.FilterRemoveUserID + searchOpts.AnyUserIDs = input.FilterAnyUserID + if input.ScheduleID != nil { + searchOpts.ScheduleID = *input.ScheduleID + } + if input.Start != nil { + searchOpts.Start = *input.Start + } + if input.End != nil { + searchOpts.End = *input.End + } + } + if input.First != nil { + searchOpts.Limit = *input.First + } + if searchOpts.Limit == 0 { + searchOpts.Limit = 15 + } + + searchOpts.Limit++ + overrides, err := q.OverrideStore.Search(ctx, &searchOpts) + if err != nil { + return nil, err + } + + conn = new(graphql2.UserOverrideConnection) + if len(overrides) == searchOpts.Limit { + overrides = overrides[:len(overrides)-1] + conn.PageInfo.HasNextPage = true + } + if len(overrides) > 0 { + last := overrides[len(overrides)-1] + searchOpts.After.ID = last.ID + + cur, err := search.Cursor(searchOpts) + if err != nil { + return conn, err + } + conn.PageInfo.EndCursor = &cur + } + conn.Nodes = overrides + return conn, err +} diff --git a/graphql2/isotimestamp.go b/graphql2/isotimestamp.go new file mode 100644 index 0000000000..3a9bc8f373 --- /dev/null +++ b/graphql2/isotimestamp.go @@ -0,0 +1,29 @@ +package graphql2 + +import ( + io "io" + "strings" + "time" + + graphql "github.com/99designs/gqlgen/graphql" + "github.com/pkg/errors" +) + +func MarshalISOTimestamp(t time.Time) graphql.Marshaler { + return graphql.WriterFunc(func(w io.Writer) { + if t.IsZero() { + io.WriteString(w, "null") + return + } + io.WriteString(w, `"`+t.Format(time.RFC3339Nano)+`"`) + }) +} +func UnmarshalISOTimestamp(v interface{}) (time.Time, error) { + str, ok := v.(string) + if !ok { + return time.Time{}, errors.New("timestamps must be strings") + } + str = strings.Trim(str, `"`) + + return time.Parse(time.RFC3339Nano, str) +} diff --git a/graphql2/mapconfig.go b/graphql2/mapconfig.go new file mode 100644 index 0000000000..4ddd3908c0 --- /dev/null +++ b/graphql2/mapconfig.go @@ -0,0 +1,198 @@ +// Code generated by devtools/configparams DO NOT EDIT. + +package graphql2 + +import ( + "fmt" + "strings" + + "github.com/target/goalert/config" + "github.com/target/goalert/validation" +) + +// MapConfigValues will map a Config struct into a flat list of ConfigValue structs. +func MapConfigValues(cfg config.Config) []ConfigValue { + return []ConfigValue{ + {ID: "General.PublicURL", Type: ConfigTypeString, Description: "Publicly routable URL for UI links and API calls.", Value: cfg.General.PublicURL}, + {ID: "General.GoogleAnalyticsID", Type: ConfigTypeString, Description: "", Value: cfg.General.GoogleAnalyticsID}, + {ID: "General.NotificationDisclaimer", Type: ConfigTypeString, Description: "Disclaimer text for receiving pre-recorded notifications (appears on profile page).", Value: cfg.General.NotificationDisclaimer}, + {ID: "General.DisableLabelCreation", Type: ConfigTypeBoolean, Description: "Disables the ability to create new labels for services.", Value: fmt.Sprintf("%t", cfg.General.DisableLabelCreation)}, + {ID: "Auth.RefererURLs", Type: ConfigTypeStringList, Description: "Allowed referer URLs for auth and redirects.", Value: strings.Join(cfg.Auth.RefererURLs, "\n")}, + {ID: "Auth.DisableBasic", Type: ConfigTypeBoolean, Description: "Disallow username/password login.", Value: fmt.Sprintf("%t", cfg.Auth.DisableBasic)}, + {ID: "GitHub.Enable", Type: ConfigTypeBoolean, Description: "Enable GitHub authentication.", Value: fmt.Sprintf("%t", cfg.GitHub.Enable)}, + {ID: "GitHub.NewUsers", Type: ConfigTypeBoolean, Description: "Allow new user creation via GitHub authentication.", Value: fmt.Sprintf("%t", cfg.GitHub.NewUsers)}, + {ID: "GitHub.ClientID", Type: ConfigTypeString, Description: "", Value: cfg.GitHub.ClientID}, + {ID: "GitHub.ClientSecret", Type: ConfigTypeString, Description: "", Value: cfg.GitHub.ClientSecret, Password: true}, + {ID: "GitHub.AllowedUsers", Type: ConfigTypeStringList, Description: "Allow any of the listed GitHub usernames to authenticate. Use '*' to allow any user.", Value: strings.Join(cfg.GitHub.AllowedUsers, "\n")}, + {ID: "GitHub.AllowedOrgs", Type: ConfigTypeStringList, Description: "Allow any member of any listed GitHub org (or team, using the format 'org/team') to authenticate.", Value: strings.Join(cfg.GitHub.AllowedOrgs, "\n")}, + {ID: "GitHub.EnterpriseURL", Type: ConfigTypeString, Description: "GitHub URL (without /api) when used with GitHub Enterprise.", Value: cfg.GitHub.EnterpriseURL}, + {ID: "OIDC.Enable", Type: ConfigTypeBoolean, Description: "Enable OpenID Connect authentication.", Value: fmt.Sprintf("%t", cfg.OIDC.Enable)}, + {ID: "OIDC.NewUsers", Type: ConfigTypeBoolean, Description: "Allow new user creation via OIDC authentication.", Value: fmt.Sprintf("%t", cfg.OIDC.NewUsers)}, + {ID: "OIDC.OverrideName", Type: ConfigTypeString, Description: "Set the name/label on the login page to something other than OIDC.", Value: cfg.OIDC.OverrideName}, + {ID: "OIDC.IssuerURL", Type: ConfigTypeString, Description: "", Value: cfg.OIDC.IssuerURL}, + {ID: "OIDC.ClientID", Type: ConfigTypeString, Description: "", Value: cfg.OIDC.ClientID}, + {ID: "OIDC.ClientSecret", Type: ConfigTypeString, Description: "", Value: cfg.OIDC.ClientSecret, Password: true}, + {ID: "Mailgun.Enable", Type: ConfigTypeBoolean, Description: "", Value: fmt.Sprintf("%t", cfg.Mailgun.Enable)}, + {ID: "Mailgun.APIKey", Type: ConfigTypeString, Description: "", Value: cfg.Mailgun.APIKey, Password: true}, + {ID: "Mailgun.EmailDomain", Type: ConfigTypeString, Description: "The TO address for all incoming alerts.", Value: cfg.Mailgun.EmailDomain}, + {ID: "Slack.Enable", Type: ConfigTypeBoolean, Description: "", Value: fmt.Sprintf("%t", cfg.Slack.Enable)}, + {ID: "Slack.ClientID", Type: ConfigTypeString, Description: "", Value: cfg.Slack.ClientID}, + {ID: "Slack.ClientSecret", Type: ConfigTypeString, Description: "", Value: cfg.Slack.ClientSecret, Password: true}, + {ID: "Slack.AccessToken", Type: ConfigTypeString, Description: "Slack app OAuth access token.", Value: cfg.Slack.AccessToken, Password: true}, + {ID: "Twilio.Enable", Type: ConfigTypeBoolean, Description: "Enables sending and processing of Voice and SMS messages through the Twilio notification provider.", Value: fmt.Sprintf("%t", cfg.Twilio.Enable)}, + {ID: "Twilio.AccountSID", Type: ConfigTypeString, Description: "", Value: cfg.Twilio.AccountSID}, + {ID: "Twilio.AuthToken", Type: ConfigTypeString, Description: "The primary Auth Token for Twilio. Must be primary (not secondary) for request valiation.", Value: cfg.Twilio.AuthToken, Password: true}, + {ID: "Twilio.FromNumber", Type: ConfigTypeString, Description: "The Twilio number to use for outgoing notifications.", Value: cfg.Twilio.FromNumber}, + {ID: "Feedback.Enable", Type: ConfigTypeBoolean, Description: "Enables Feedback link in nav bar.", Value: fmt.Sprintf("%t", cfg.Feedback.Enable)}, + {ID: "Feedback.OverrideURL", Type: ConfigTypeString, Description: "Use a custom URL for Feedback link in nav bar.", Value: cfg.Feedback.OverrideURL}, + } +} + +// MapPublicConfigValues will map a Config struct into a flat list of ConfigValue structs. +func MapPublicConfigValues(cfg config.Config) []ConfigValue { + return []ConfigValue{ + {ID: "General.GoogleAnalyticsID", Type: ConfigTypeString, Description: "", Value: cfg.General.GoogleAnalyticsID}, + {ID: "General.NotificationDisclaimer", Type: ConfigTypeString, Description: "Disclaimer text for receiving pre-recorded notifications (appears on profile page).", Value: cfg.General.NotificationDisclaimer}, + {ID: "General.DisableLabelCreation", Type: ConfigTypeBoolean, Description: "Disables the ability to create new labels for services.", Value: fmt.Sprintf("%t", cfg.General.DisableLabelCreation)}, + {ID: "Auth.DisableBasic", Type: ConfigTypeBoolean, Description: "Disallow username/password login.", Value: fmt.Sprintf("%t", cfg.Auth.DisableBasic)}, + {ID: "GitHub.Enable", Type: ConfigTypeBoolean, Description: "Enable GitHub authentication.", Value: fmt.Sprintf("%t", cfg.GitHub.Enable)}, + {ID: "OIDC.Enable", Type: ConfigTypeBoolean, Description: "Enable OpenID Connect authentication.", Value: fmt.Sprintf("%t", cfg.OIDC.Enable)}, + {ID: "Mailgun.Enable", Type: ConfigTypeBoolean, Description: "", Value: fmt.Sprintf("%t", cfg.Mailgun.Enable)}, + {ID: "Slack.Enable", Type: ConfigTypeBoolean, Description: "", Value: fmt.Sprintf("%t", cfg.Slack.Enable)}, + {ID: "Twilio.Enable", Type: ConfigTypeBoolean, Description: "Enables sending and processing of Voice and SMS messages through the Twilio notification provider.", Value: fmt.Sprintf("%t", cfg.Twilio.Enable)}, + {ID: "Twilio.FromNumber", Type: ConfigTypeString, Description: "The Twilio number to use for outgoing notifications.", Value: cfg.Twilio.FromNumber}, + {ID: "Feedback.Enable", Type: ConfigTypeBoolean, Description: "Enables Feedback link in nav bar.", Value: fmt.Sprintf("%t", cfg.Feedback.Enable)}, + {ID: "Feedback.OverrideURL", Type: ConfigTypeString, Description: "Use a custom URL for Feedback link in nav bar.", Value: cfg.Feedback.OverrideURL}, + } +} + +// ApplyConfigValues will apply a list of ConfigValues to a Config struct. +func ApplyConfigValues(cfg config.Config, vals []ConfigValueInput) (config.Config, error) { + parseStringList := func(v string) []string { + if v == "" { + return nil + } + return strings.Split(v, "\n") + } + parseBool := func(id, v string) (bool, error) { + switch v { + case "true": + return true, nil + case "false": + return false, nil + default: + return false, validation.NewFieldError("\""+id+"\".Value", "boolean value invalid: expected 'true' or 'false'") + } + } + for _, v := range vals { + switch v.ID { + case "General.PublicURL": + cfg.General.PublicURL = v.Value + case "General.GoogleAnalyticsID": + cfg.General.GoogleAnalyticsID = v.Value + case "General.NotificationDisclaimer": + cfg.General.NotificationDisclaimer = v.Value + case "General.DisableLabelCreation": + val, err := parseBool(v.ID, v.Value) + if err != nil { + return cfg, err + } + cfg.General.DisableLabelCreation = val + case "Auth.RefererURLs": + cfg.Auth.RefererURLs = parseStringList(v.Value) + case "Auth.DisableBasic": + val, err := parseBool(v.ID, v.Value) + if err != nil { + return cfg, err + } + cfg.Auth.DisableBasic = val + case "GitHub.Enable": + val, err := parseBool(v.ID, v.Value) + if err != nil { + return cfg, err + } + cfg.GitHub.Enable = val + case "GitHub.NewUsers": + val, err := parseBool(v.ID, v.Value) + if err != nil { + return cfg, err + } + cfg.GitHub.NewUsers = val + case "GitHub.ClientID": + cfg.GitHub.ClientID = v.Value + case "GitHub.ClientSecret": + cfg.GitHub.ClientSecret = v.Value + case "GitHub.AllowedUsers": + cfg.GitHub.AllowedUsers = parseStringList(v.Value) + case "GitHub.AllowedOrgs": + cfg.GitHub.AllowedOrgs = parseStringList(v.Value) + case "GitHub.EnterpriseURL": + cfg.GitHub.EnterpriseURL = v.Value + case "OIDC.Enable": + val, err := parseBool(v.ID, v.Value) + if err != nil { + return cfg, err + } + cfg.OIDC.Enable = val + case "OIDC.NewUsers": + val, err := parseBool(v.ID, v.Value) + if err != nil { + return cfg, err + } + cfg.OIDC.NewUsers = val + case "OIDC.OverrideName": + cfg.OIDC.OverrideName = v.Value + case "OIDC.IssuerURL": + cfg.OIDC.IssuerURL = v.Value + case "OIDC.ClientID": + cfg.OIDC.ClientID = v.Value + case "OIDC.ClientSecret": + cfg.OIDC.ClientSecret = v.Value + case "Mailgun.Enable": + val, err := parseBool(v.ID, v.Value) + if err != nil { + return cfg, err + } + cfg.Mailgun.Enable = val + case "Mailgun.APIKey": + cfg.Mailgun.APIKey = v.Value + case "Mailgun.EmailDomain": + cfg.Mailgun.EmailDomain = v.Value + case "Slack.Enable": + val, err := parseBool(v.ID, v.Value) + if err != nil { + return cfg, err + } + cfg.Slack.Enable = val + case "Slack.ClientID": + cfg.Slack.ClientID = v.Value + case "Slack.ClientSecret": + cfg.Slack.ClientSecret = v.Value + case "Slack.AccessToken": + cfg.Slack.AccessToken = v.Value + case "Twilio.Enable": + val, err := parseBool(v.ID, v.Value) + if err != nil { + return cfg, err + } + cfg.Twilio.Enable = val + case "Twilio.AccountSID": + cfg.Twilio.AccountSID = v.Value + case "Twilio.AuthToken": + cfg.Twilio.AuthToken = v.Value + case "Twilio.FromNumber": + cfg.Twilio.FromNumber = v.Value + case "Feedback.Enable": + val, err := parseBool(v.ID, v.Value) + if err != nil { + return cfg, err + } + cfg.Feedback.Enable = val + case "Feedback.OverrideURL": + cfg.Feedback.OverrideURL = v.Value + default: + return cfg, validation.NewFieldError("ID", fmt.Sprintf("unknown config ID '%s'", v.ID)) + } + } + return cfg, nil +} diff --git a/graphql2/models_gen.go b/graphql2/models_gen.go new file mode 100644 index 0000000000..eb10f07f6d --- /dev/null +++ b/graphql2/models_gen.go @@ -0,0 +1,522 @@ +// Code generated by github.com/99designs/gqlgen, DO NOT EDIT. + +package graphql2 + +import ( + "fmt" + "io" + "strconv" + "time" + + "github.com/target/goalert/alert" + "github.com/target/goalert/assignment" + "github.com/target/goalert/escalation" + "github.com/target/goalert/label" + "github.com/target/goalert/notification/slack" + "github.com/target/goalert/override" + "github.com/target/goalert/schedule" + "github.com/target/goalert/schedule/rotation" + "github.com/target/goalert/schedule/rule" + "github.com/target/goalert/service" + "github.com/target/goalert/user" + "github.com/target/goalert/user/contactmethod" +) + +type AlertConnection struct { + Nodes []alert.Alert `json:"nodes"` + PageInfo PageInfo `json:"pageInfo"` +} + +type AlertSearchOptions struct { + FilterByStatus []AlertStatus `json:"filterByStatus"` + FilterByServiceID []string `json:"filterByServiceID"` + Search *string `json:"search"` + First *int `json:"first"` + After *string `json:"after"` + FavoritesOnly *bool `json:"favoritesOnly"` + Omit []int `json:"omit"` +} + +type AuthSubjectConnection struct { + Nodes []user.AuthSubject `json:"nodes"` + PageInfo PageInfo `json:"pageInfo"` +} + +type ConfigValue struct { + ID string `json:"id"` + Description string `json:"description"` + Value string `json:"value"` + Type ConfigType `json:"type"` + Password bool `json:"password"` +} + +type ConfigValueInput struct { + ID string `json:"id"` + Value string `json:"value"` +} + +type CreateEscalationPolicyInput struct { + Name string `json:"name"` + Description *string `json:"description"` + Repeat *int `json:"repeat"` + Steps []CreateEscalationPolicyStepInput `json:"steps"` +} + +type CreateEscalationPolicyStepInput struct { + EscalationPolicyID *string `json:"escalationPolicyID"` + DelayMinutes int `json:"delayMinutes"` + Targets []assignment.RawTarget `json:"targets"` + NewRotation *CreateRotationInput `json:"newRotation"` + NewSchedule *CreateScheduleInput `json:"newSchedule"` +} + +type CreateIntegrationKeyInput struct { + ServiceID *string `json:"serviceID"` + Type IntegrationKeyType `json:"type"` + Name string `json:"name"` +} + +type CreateRotationInput struct { + Name string `json:"name"` + Description *string `json:"description"` + TimeZone string `json:"timeZone"` + Start time.Time `json:"start"` + Type rotation.Type `json:"type"` + ShiftLength *int `json:"shiftLength"` + UserIDs []string `json:"userIDs"` +} + +type CreateScheduleInput struct { + Name string `json:"name"` + Description *string `json:"description"` + TimeZone string `json:"timeZone"` + Targets []ScheduleTargetInput `json:"targets"` +} + +type CreateServiceInput struct { + Name string `json:"name"` + Description *string `json:"description"` + EscalationPolicyID *string `json:"escalationPolicyID"` + NewEscalationPolicy *CreateEscalationPolicyInput `json:"newEscalationPolicy"` + NewIntegrationKeys []CreateIntegrationKeyInput `json:"newIntegrationKeys"` + Labels []SetLabelInput `json:"labels"` +} + +type CreateUserContactMethodInput struct { + UserID string `json:"userID"` + Type contactmethod.Type `json:"type"` + Name string `json:"name"` + Value string `json:"value"` + NewUserNotificationRule *CreateUserNotificationRuleInput `json:"newUserNotificationRule"` +} + +type CreateUserNotificationRuleInput struct { + UserID *string `json:"userID"` + ContactMethodID *string `json:"contactMethodID"` + DelayMinutes int `json:"delayMinutes"` +} + +type CreateUserOverrideInput struct { + ScheduleID string `json:"scheduleID"` + Start time.Time `json:"start"` + End time.Time `json:"end"` + AddUserID *string `json:"addUserID"` + RemoveUserID *string `json:"removeUserID"` +} + +type EscalationPolicyConnection struct { + Nodes []escalation.Policy `json:"nodes"` + PageInfo PageInfo `json:"pageInfo"` +} + +type EscalationPolicySearchOptions struct { + First *int `json:"first"` + After *string `json:"after"` + Search *string `json:"search"` + Omit []string `json:"omit"` +} + +type LabelConnection struct { + Nodes []label.Label `json:"nodes"` + PageInfo PageInfo `json:"pageInfo"` +} + +type LabelSearchOptions struct { + First *int `json:"first"` + After *string `json:"after"` + Search *string `json:"search"` + UniqueKeys *bool `json:"uniqueKeys"` + Omit []string `json:"omit"` +} + +type PageInfo struct { + EndCursor *string `json:"endCursor"` + HasNextPage bool `json:"hasNextPage"` +} + +type RotationConnection struct { + Nodes []rotation.Rotation `json:"nodes"` + PageInfo PageInfo `json:"pageInfo"` +} + +type RotationSearchOptions struct { + First *int `json:"first"` + After *string `json:"after"` + Search *string `json:"search"` + Omit []string `json:"omit"` +} + +type ScheduleConnection struct { + Nodes []schedule.Schedule `json:"nodes"` + PageInfo PageInfo `json:"pageInfo"` +} + +type ScheduleRuleInput struct { + ID *string `json:"id"` + Start *rule.Clock `json:"start"` + End *rule.Clock `json:"end"` + WeekdayFilter []bool `json:"weekdayFilter"` +} + +type ScheduleSearchOptions struct { + First *int `json:"first"` + After *string `json:"after"` + Search *string `json:"search"` + Omit []string `json:"omit"` +} + +type ScheduleTarget struct { + ScheduleID string `json:"scheduleID"` + Target assignment.RawTarget `json:"target"` + Rules []rule.Rule `json:"rules"` +} + +type ScheduleTargetInput struct { + ScheduleID *string `json:"scheduleID"` + Target *assignment.RawTarget `json:"target"` + NewRotation *CreateRotationInput `json:"newRotation"` + Rules []ScheduleRuleInput `json:"rules"` +} + +type ServiceConnection struct { + Nodes []service.Service `json:"nodes"` + PageInfo PageInfo `json:"pageInfo"` +} + +type ServiceSearchOptions struct { + First *int `json:"first"` + After *string `json:"after"` + Search *string `json:"search"` + Omit []string `json:"omit"` + FavoritesOnly *bool `json:"favoritesOnly"` + FavoritesFirst *bool `json:"favoritesFirst"` +} + +type SetFavoriteInput struct { + Target assignment.RawTarget `json:"target"` + Favorite bool `json:"favorite"` +} + +type SetLabelInput struct { + Target *assignment.RawTarget `json:"target"` + Key string `json:"key"` + Value string `json:"value"` +} + +type SlackChannelConnection struct { + Nodes []slack.Channel `json:"nodes"` + PageInfo PageInfo `json:"pageInfo"` +} + +type SlackChannelSearchOptions struct { + First *int `json:"first"` + After *string `json:"after"` + Search *string `json:"search"` + Omit []string `json:"omit"` +} + +type TimeZone struct { + ID string `json:"id"` +} + +type TimeZoneConnection struct { + Nodes []TimeZone `json:"nodes"` + PageInfo PageInfo `json:"pageInfo"` +} + +type TimeZoneSearchOptions struct { + First *int `json:"first"` + After *string `json:"after"` + Search *string `json:"search"` + Omit []string `json:"omit"` +} + +type UpdateAlertsInput struct { + AlertIDs []int `json:"alertIDs"` + NewStatus AlertStatus `json:"newStatus"` +} + +type UpdateEscalationPolicyInput struct { + ID string `json:"id"` + Name *string `json:"name"` + Description *string `json:"description"` + Repeat *int `json:"repeat"` + StepIDs []string `json:"stepIDs"` +} + +type UpdateEscalationPolicyStepInput struct { + ID string `json:"id"` + DelayMinutes *int `json:"delayMinutes"` + Targets []assignment.RawTarget `json:"targets"` +} + +type UpdateRotationInput struct { + ID string `json:"id"` + Name *string `json:"name"` + Description *string `json:"description"` + TimeZone *string `json:"timeZone"` + Start *time.Time `json:"start"` + Type *rotation.Type `json:"type"` + ShiftLength *int `json:"shiftLength"` + ActiveUserIndex *int `json:"activeUserIndex"` + UserIDs []string `json:"userIDs"` +} + +type UpdateScheduleInput struct { + ID string `json:"id"` + Name *string `json:"name"` + Description *string `json:"description"` + TimeZone *string `json:"timeZone"` +} + +type UpdateServiceInput struct { + ID string `json:"id"` + Name *string `json:"name"` + Description *string `json:"description"` + EscalationPolicyID *string `json:"escalationPolicyID"` +} + +type UpdateUserContactMethodInput struct { + ID string `json:"id"` + Name *string `json:"name"` + Value *string `json:"value"` +} + +type UpdateUserInput struct { + ID string `json:"id"` + Name *string `json:"name"` + Email *string `json:"email"` + Role *UserRole `json:"role"` + StatusUpdateContactMethodID *string `json:"statusUpdateContactMethodID"` +} + +type UpdateUserOverrideInput struct { + ID string `json:"id"` + Start *time.Time `json:"start"` + End *time.Time `json:"end"` + AddUserID *string `json:"addUserID"` + RemoveUserID *string `json:"removeUserID"` +} + +type UserConnection struct { + Nodes []user.User `json:"nodes"` + PageInfo PageInfo `json:"pageInfo"` +} + +type UserOverrideConnection struct { + Nodes []override.UserOverride `json:"nodes"` + PageInfo PageInfo `json:"pageInfo"` +} + +type UserOverrideSearchOptions struct { + First *int `json:"first"` + After *string `json:"after"` + Omit []string `json:"omit"` + ScheduleID *string `json:"scheduleID"` + FilterAddUserID []string `json:"filterAddUserID"` + FilterRemoveUserID []string `json:"filterRemoveUserID"` + FilterAnyUserID []string `json:"filterAnyUserID"` + Start *time.Time `json:"start"` + End *time.Time `json:"end"` +} + +type UserSearchOptions struct { + First *int `json:"first"` + After *string `json:"after"` + Search *string `json:"search"` + Omit []string `json:"omit"` +} + +type AlertStatus string + +const ( + AlertStatusStatusAcknowledged AlertStatus = "StatusAcknowledged" + AlertStatusStatusClosed AlertStatus = "StatusClosed" + AlertStatusStatusUnacknowledged AlertStatus = "StatusUnacknowledged" +) + +var AllAlertStatus = []AlertStatus{ + AlertStatusStatusAcknowledged, + AlertStatusStatusClosed, + AlertStatusStatusUnacknowledged, +} + +func (e AlertStatus) IsValid() bool { + switch e { + case AlertStatusStatusAcknowledged, AlertStatusStatusClosed, AlertStatusStatusUnacknowledged: + return true + } + return false +} + +func (e AlertStatus) String() string { + return string(e) +} + +func (e *AlertStatus) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("enums must be strings") + } + + *e = AlertStatus(str) + if !e.IsValid() { + return fmt.Errorf("%s is not a valid AlertStatus", str) + } + return nil +} + +func (e AlertStatus) MarshalGQL(w io.Writer) { + fmt.Fprint(w, strconv.Quote(e.String())) +} + +type ConfigType string + +const ( + ConfigTypeString ConfigType = "string" + ConfigTypeStringList ConfigType = "stringList" + ConfigTypeInteger ConfigType = "integer" + ConfigTypeBoolean ConfigType = "boolean" +) + +var AllConfigType = []ConfigType{ + ConfigTypeString, + ConfigTypeStringList, + ConfigTypeInteger, + ConfigTypeBoolean, +} + +func (e ConfigType) IsValid() bool { + switch e { + case ConfigTypeString, ConfigTypeStringList, ConfigTypeInteger, ConfigTypeBoolean: + return true + } + return false +} + +func (e ConfigType) String() string { + return string(e) +} + +func (e *ConfigType) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("enums must be strings") + } + + *e = ConfigType(str) + if !e.IsValid() { + return fmt.Errorf("%s is not a valid ConfigType", str) + } + return nil +} + +func (e ConfigType) MarshalGQL(w io.Writer) { + fmt.Fprint(w, strconv.Quote(e.String())) +} + +type IntegrationKeyType string + +const ( + IntegrationKeyTypeGeneric IntegrationKeyType = "generic" + IntegrationKeyTypeGrafana IntegrationKeyType = "grafana" + IntegrationKeyTypeEmail IntegrationKeyType = "email" +) + +var AllIntegrationKeyType = []IntegrationKeyType{ + IntegrationKeyTypeGeneric, + IntegrationKeyTypeGrafana, + IntegrationKeyTypeEmail, +} + +func (e IntegrationKeyType) IsValid() bool { + switch e { + case IntegrationKeyTypeGeneric, IntegrationKeyTypeGrafana, IntegrationKeyTypeEmail: + return true + } + return false +} + +func (e IntegrationKeyType) String() string { + return string(e) +} + +func (e *IntegrationKeyType) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("enums must be strings") + } + + *e = IntegrationKeyType(str) + if !e.IsValid() { + return fmt.Errorf("%s is not a valid IntegrationKeyType", str) + } + return nil +} + +func (e IntegrationKeyType) MarshalGQL(w io.Writer) { + fmt.Fprint(w, strconv.Quote(e.String())) +} + +type UserRole string + +const ( + UserRoleUnknown UserRole = "unknown" + UserRoleUser UserRole = "user" + UserRoleAdmin UserRole = "admin" +) + +var AllUserRole = []UserRole{ + UserRoleUnknown, + UserRoleUser, + UserRoleAdmin, +} + +func (e UserRole) IsValid() bool { + switch e { + case UserRoleUnknown, UserRoleUser, UserRoleAdmin: + return true + } + return false +} + +func (e UserRole) String() string { + return string(e) +} + +func (e *UserRole) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("enums must be strings") + } + + *e = UserRole(str) + if !e.IsValid() { + return fmt.Errorf("%s is not a valid UserRole", str) + } + return nil +} + +func (e UserRole) MarshalGQL(w io.Writer) { + fmt.Fprint(w, strconv.Quote(e.String())) +} diff --git a/graphql2/schema.graphql b/graphql2/schema.graphql new file mode 100644 index 0000000000..0964ba9043 --- /dev/null +++ b/graphql2/schema.graphql @@ -0,0 +1,755 @@ +type Query { + # Returns the user with the given ID. If no ID is specified, + # the current user is implied. + user(id: ID): User + + # Returns a list of users who's name or email match search string. + users( + input: UserSearchOptions + first: Int = 15 + after: String = "" + search: String = "" + ): UserConnection! + + # Returns a single alert with the given ID. + alert(id: Int!): Alert + + # Returns a paginated list of alerts. + alerts(input: AlertSearchOptions): AlertConnection! + + # Returns a single service with the given ID. + service(id: ID!): Service + + # Returns a single integration key with the given ID. + integrationKey(id: ID!): IntegrationKey + + # Returns a paginated list of services. + services(input: ServiceSearchOptions): ServiceConnection! + + # Returns a single rotation with the given ID. + rotation(id: ID!): Rotation + + # Returns a paginated list of rotations. + rotations(input: RotationSearchOptions): RotationConnection! + + # Returns a single schedule with the given ID. + schedule(id: ID!): Schedule + + # Returns a paginated list of schedules. + schedules(input: ScheduleSearchOptions): ScheduleConnection! + + # Returns a single escalation policy with the given ID. + escalationPolicy(id: ID!): EscalationPolicy + + # Returns a paginated list of escalation policies. + escalationPolicies( + input: EscalationPolicySearchOptions + ): EscalationPolicyConnection! + + # Returns the list of auth subjects for the given provider ID. + authSubjectsForProvider( + first: Int = 15 + after: String = "" + providerID: ID! + ): AuthSubjectConnection! + + # Returns a paginated list of time zones. + timeZones(input: TimeZoneSearchOptions): TimeZoneConnection! + + # Allows searching for assigned labels. + labels(input: LabelSearchOptions): LabelConnection! + + # Allows searching for user overrides. + userOverrides(input: UserOverrideSearchOptions): UserOverrideConnection! + + # Returns a single user override with the given ID. + userOverride(id: ID!): UserOverride + + # Returns public server configuration values. If all is set to true, + # then all values are returned (must be admin). + config(all: Boolean): [ConfigValue!]! + + # Returns a contact method with the given ID. + userContactMethod(id: ID!): UserContactMethod + + # Returns the list of Slack channels available to the current user. + slackChannels(input: SlackChannelSearchOptions): SlackChannelConnection! + + # Returns a Slack channel with the given ID. + slackChannel(id: ID!): SlackChannel +} + +input SlackChannelSearchOptions { + first: Int = 15 + after: String = "" + search: String = "" + omit: [ID!] +} + +type SlackChannel { + id: ID! + name: String! +} + +type SlackChannelConnection { + nodes: [SlackChannel!]! + pageInfo: PageInfo! +} + +type ConfigValue { + id: String! + description: String! + value: String! + type: ConfigType! + password: Boolean! +} +enum ConfigType { + string + stringList + integer + boolean +} + +input UserOverrideSearchOptions { + first: Int = 15 + after: String = "" + omit: [ID!] + + scheduleID: ID # limit search to a single schedule + filterAddUserID: [ID!] # only return overrides where the provided users have been added to a schedule (add or replace types). + filterRemoveUserID: [ID!] # only return overrides where the provided users have been removed from a schedule (remove or replace types). + filterAnyUserID: [ID!] # only return overrides that add/remove/replace at least one of the provided user IDs. + start: ISOTimestamp # start of the window to search for. + end: ISOTimestamp # end of the window to search for. +} + +type UserOverrideConnection { + nodes: [UserOverride!]! + pageInfo: PageInfo! +} +type UserOverride { + id: ID! + + start: ISOTimestamp! + end: ISOTimestamp! + + addUserID: ID! + removeUserID: ID! + + addUser: User + removeUser: User + + target: Target! +} +input LabelSearchOptions { + first: Int = 15 + after: String = "" + search: String = "" + uniqueKeys: Boolean = false + omit: [ID!] +} + +type LabelConnection { + nodes: [Label!]! + pageInfo: PageInfo! +} + +type Mutation { + addAuthSubject(input: AuthSubjectInput!): Boolean! + deleteAuthSubject(input: AuthSubjectInput!): Boolean! + updateUser(input: UpdateUserInput!): Boolean! + + testContactMethod(id: ID!): Boolean! + + # Updates the status for multiple alerts given the list of alertIDs and the status they want to be updated to. + updateAlerts(input: UpdateAlertsInput!): [Alert!] + + # Updates the fields for a rotation given the rotationID, also updates ordering of and number of users for the rotation. + updateRotation(input: UpdateRotationInput!): Boolean! + + # Escalates multiple alerts given the list of alertIDs. + escalateAlerts(input: [Int!]): [Alert!] + + # Updates the favorite status of a target. + setFavorite(input: SetFavoriteInput!): Boolean! + + updateService(input: UpdateServiceInput!): Boolean! + updateEscalationPolicy(input: UpdateEscalationPolicyInput!): Boolean! + updateEscalationPolicyStep(input: UpdateEscalationPolicyStepInput!): Boolean! + + deleteAll(input: [TargetInput!]): Boolean! + + createService(input: CreateServiceInput!): Service + createEscalationPolicy(input: CreateEscalationPolicyInput!): EscalationPolicy + createEscalationPolicyStep( + input: CreateEscalationPolicyStepInput! + ): EscalationPolicyStep + createRotation(input: CreateRotationInput!): Rotation + + createIntegrationKey(input: CreateIntegrationKeyInput!): IntegrationKey + + setLabel(input: SetLabelInput!): Boolean! + + createSchedule(input: CreateScheduleInput!): Schedule + updateScheduleTarget(input: ScheduleTargetInput!): Boolean! + createUserOverride(input: CreateUserOverrideInput!): UserOverride + + createUserContactMethod( + input: CreateUserContactMethodInput! + ): UserContactMethod + createUserNotificationRule( + input: CreateUserNotificationRuleInput! + ): UserNotificationRule + updateUserContactMethod(input: UpdateUserContactMethodInput!): Boolean! + + updateSchedule(input: UpdateScheduleInput!): Boolean! + updateUserOverride(input: UpdateUserOverrideInput!): Boolean! + + setConfig(input: [ConfigValueInput!]): Boolean! +} + +input ConfigValueInput { + id: String! + value: String! +} + +input UpdateUserOverrideInput { + id: ID! + + start: ISOTimestamp + end: ISOTimestamp + + addUserID: ID + removeUserID: ID +} + +input CreateUserOverrideInput { + scheduleID: ID! + + start: ISOTimestamp! + end: ISOTimestamp! + + addUserID: ID + removeUserID: ID +} + +input CreateScheduleInput { + name: String! + description: String + timeZone: String! + + targets: [ScheduleTargetInput!] +} + +input ScheduleTargetInput { + scheduleID: ID + target: TargetInput + newRotation: CreateRotationInput + rules: [ScheduleRuleInput!]! +} + +input ScheduleRuleInput { + id: ID + start: ClockTime + end: ClockTime + + # weekdayFilter is a 7-item array that indicates if the rule + # is active on each weekday, starting with Sunday. + weekdayFilter: [Boolean!] +} + +input SetLabelInput { + target: TargetInput + key: String! + + # If value is empty, the label is removed. + value: String! +} + +input TimeZoneSearchOptions { + first: Int = 15 + after: String = "" + search: String = "" + omit: [ID!] +} + +type TimeZoneConnection { + nodes: [TimeZone!]! + pageInfo: PageInfo! +} + +type TimeZone { + id: String! +} + +input CreateServiceInput { + name: String! + description: String = "" + + escalationPolicyID: ID + newEscalationPolicy: CreateEscalationPolicyInput + newIntegrationKeys: [CreateIntegrationKeyInput!] + labels: [SetLabelInput!] +} + +input CreateEscalationPolicyInput { + name: String! + description: String = "" + repeat: Int = 3 + + steps: [CreateEscalationPolicyStepInput!] +} + +input CreateEscalationPolicyStepInput { + escalationPolicyID: ID + + delayMinutes: Int! + + targets: [TargetInput!] + newRotation: CreateRotationInput + newSchedule: CreateScheduleInput +} + +type EscalationPolicyStep { + id: ID! + stepNumber: Int! + delayMinutes: Int! + targets: [Target!]! + escalationPolicy: EscalationPolicy +} + +input UpdateScheduleInput { + id: ID! + name: String + description: String + timeZone: String +} + +input UpdateServiceInput { + id: ID! + name: String + description: String + escalationPolicyID: ID +} + +input UpdateEscalationPolicyInput { + id: ID! + name: String + description: String + repeat: Int + stepIDs: [String!] +} + +input UpdateEscalationPolicyStepInput { + id: ID! + delayMinutes: Int + targets: [TargetInput!] +} + +input SetFavoriteInput { + target: TargetInput! + favorite: Boolean! +} + +type EscalationPolicyConnection { + nodes: [EscalationPolicy!]! + pageInfo: PageInfo! +} + +type AlertConnection { + nodes: [Alert!]! + pageInfo: PageInfo! +} + +type ScheduleConnection { + nodes: [Schedule!]! + pageInfo: PageInfo! +} + +type Schedule { + id: ID! + name: String! + description: String! + timeZone: String! + + assignedTo: [Target!]! + shifts(start: ISOTimestamp!, end: ISOTimestamp!): [OnCallShift!]! + + targets: [ScheduleTarget!]! + target(input: TargetInput!): ScheduleTarget +} + +type OnCallShift { + userID: ID! + user: User + start: ISOTimestamp! + end: ISOTimestamp! + truncated: Boolean! +} + +type ScheduleTarget { + scheduleID: ID! + target: Target! + rules: [ScheduleRule!]! +} + +type ScheduleRule { + id: ID! + scheduleID: ID! + + start: ClockTime! + end: ClockTime! + + # weekdayFilter is a 7-item array that indicates if the rule + # is active on each weekday, starting with Sunday. + weekdayFilter: [Boolean!]! + + target: Target! +} + +type RotationConnection { + nodes: [Rotation!]! + pageInfo: PageInfo! +} + +input CreateRotationInput { + name: String! + description: String + + timeZone: String! + start: ISOTimestamp! + + type: RotationType! + shiftLength: Int = 1 + + userIDs: [ID!] +} + +type Rotation { + id: ID! + name: String! + description: String! + + start: ISOTimestamp! + timeZone: String! + + type: RotationType! + shiftLength: Int! + + activeUserIndex: Int! + + userIDs: [ID!]! + users: [User!]! + + nextHandoffTimes(num: Int): [ISOTimestamp!]! +} + +enum RotationType { + weekly + daily + hourly +} + +input UpdateAlertsInput { + # List of alertIDs. + alertIDs: [Int!]! + + newStatus: AlertStatus! +} + +input UpdateRotationInput { + id: ID! + + name: String + description: String + timeZone: String + start: ISOTimestamp + type: RotationType + shiftLength: Int + + activeUserIndex: Int + + # activeUserIndex will not be changed, as the index will remain the same. + # On call user may change since whatever index is put into activeUserIndex will be on call. + userIDs: [ID!] +} + +input RotationSearchOptions { + first: Int = 15 + after: String = "" + search: String = "" + omit: [ID!] +} + +input EscalationPolicySearchOptions { + first: Int = 15 + after: String = "" + search: String = "" + omit: [ID!] +} + +input ScheduleSearchOptions { + first: Int = 15 + after: String = "" + search: String = "" + omit: [ID!] +} + +input ServiceSearchOptions { + first: Int = 15 + after: String = "" + search: String = "" + omit: [ID!] + + # Include only favorited services in the results. + favoritesOnly: Boolean = false + + # Sort favorite services first. + favoritesFirst: Boolean = false +} + +input UserSearchOptions { + first: Int = 15 + after: String = "" + search: String = "" + omit: [ID!] +} + +input AlertSearchOptions { + filterByStatus: [AlertStatus!] + filterByServiceID: [ID!] + search: String = "" + first: Int = 15 + after: String = "" + favoritesOnly: Boolean = false + omit: [Int!] +} + +# An ISOTimestamp is an RFC3339-formatted timestamp string. +scalar ISOTimestamp + +# ClockTime is a 24-hour time in the format 00:00 +scalar ClockTime + +type Alert { + id: ID! + alertID: Int! + status: AlertStatus! + summary: String! + details: String! + createdAt: ISOTimestamp! + serviceID: ID! + service: Service + + # Escalation Policy State for the alert. + state: AlertState +} + +# The escalation policy state details for the alert. +type AlertState { + lastEscalation: ISOTimestamp! + stepNumber: Int! + repeatCount: Int! +} + +type Service { + id: ID! + name: String! + description: String! + escalationPolicyID: ID! + escalationPolicy: EscalationPolicy + isFavorite: Boolean! + + onCallUsers: [ServiceOnCallUser!]! + integrationKeys: [IntegrationKey!]! + labels: [Label!]! +} + +input CreateIntegrationKeyInput { + serviceID: ID + type: IntegrationKeyType! + name: String! +} + +type Label { + key: String! + value: String! +} + +type IntegrationKey { + id: ID! + serviceID: ID! + type: IntegrationKeyType! + name: String! + href: String! +} + +enum IntegrationKeyType { + generic + grafana + email +} + +type ServiceOnCallUser { + userID: ID! + userName: String! + stepNumber: Int! +} + +type EscalationPolicy { + id: ID! + name: String! + description: String! + repeat: Int! + + assignedTo: [Target!]! + steps: [EscalationPolicyStep!]! +} + +# Different Alert Status. +enum AlertStatus { + StatusAcknowledged + StatusClosed + StatusUnacknowledged +} + +type Target { + id: ID! + type: TargetType! + name: String +} + +input TargetInput { + id: ID! + type: TargetType! +} + +enum TargetType { + escalationPolicy + notificationChannel + slackChannel + notificationPolicy + rotation + service + schedule + user + integrationKey + userOverride + notificationRule + contactMethod +} + +type ServiceConnection { + nodes: [Service!]! + pageInfo: PageInfo! +} + +type UserConnection { + nodes: [User!]! + pageInfo: PageInfo! +} + +type AuthSubjectConnection { + nodes: [AuthSubject!]! + pageInfo: PageInfo! +} + +type PageInfo { + endCursor: String + hasNextPage: Boolean! +} + +input UpdateUserInput { + id: ID! + name: String + email: String + role: UserRole + + statusUpdateContactMethodID: ID +} + +input AuthSubjectInput { + userID: ID! + providerID: ID! + subjectID: ID! +} + +enum UserRole { + unknown + user + admin +} + +type User { + id: ID! + + role: UserRole! + + # The user's configured name. + name: String! + + # Email of the user. + email: String! + + contactMethods: [UserContactMethod!]! + notificationRules: [UserNotificationRule!]! + + statusUpdateContactMethodID: ID! + + authSubjects: [AuthSubject!]! + + onCallSteps: [EscalationPolicyStep!]! +} + +type UserNotificationRule { + id: ID! + delayMinutes: Int! + + contactMethodID: ID! + contactMethod: UserContactMethod +} + +enum ContactMethodType { + SMS + VOICE +} + +# A method of contacting a user. +type UserContactMethod { + id: ID! + + type: ContactMethodType + + # User-defined label for this contact method. + name: String! + + value: String! +} + +input CreateUserContactMethodInput { + userID: ID! + + type: ContactMethodType! + name: String! + value: String! + newUserNotificationRule: CreateUserNotificationRuleInput +} + +input CreateUserNotificationRuleInput { + userID: ID + contactMethodID: ID + delayMinutes: Int! +} + +input UpdateUserContactMethodInput { + id: ID! + + name: String + value: String +} + +type AuthSubject { + providerID: ID! + subjectID: ID! + userID: ID! +} diff --git a/heartbeat/monitor.go b/heartbeat/monitor.go new file mode 100644 index 0000000000..d8143d7bd2 --- /dev/null +++ b/heartbeat/monitor.go @@ -0,0 +1,40 @@ +package heartbeat + +import ( + "database/sql" + "github.com/target/goalert/validation/validate" +) + +// A Monitor will generate an alert if it does not receive a heartbeat within the configured IntervalMinutes. +type Monitor struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + ServiceID string `json:"service_id,omitempty"` + IntervalMinutes int `json:"interval_minutes,omitempty"` + + lastState State + lastHeartbeatMinutes sql.NullInt64 +} + +// LastState returns the last known state. +func (m Monitor) LastState() State { return m.lastState } + +// LastHeartbeatMinutes returns the minutes since the heartbeat last reported. +// The interval is truncated, so a value of 0 means "less than 1 minute". +func (m Monitor) LastHeartbeatMinutes() (int, bool) { + return int(m.lastHeartbeatMinutes.Int64), m.lastHeartbeatMinutes.Valid +} + +// Normalize performs validation and returns a new copy. +func (m Monitor) Normalize() (*Monitor, error) { + err := validate.Many( + validate.UUID("ServiceID", m.ServiceID), + validate.IDName("Name", m.Name), + validate.Range("IntervalMinutes", m.IntervalMinutes, 1, 9000), + ) + if err != nil { + return nil, err + } + + return &m, nil +} diff --git a/heartbeat/state.go b/heartbeat/state.go new file mode 100644 index 0000000000..a760fa92f7 --- /dev/null +++ b/heartbeat/state.go @@ -0,0 +1,31 @@ +package heartbeat + +import "fmt" + +// State represents the health of a heartbeat monitor. +type State string + +const ( + // StateInactive means the heartbeat has not yet reported for the first time. + StateInactive State = "inactive" + + // StateHealthy indicates a heartbeat was received within the past interval. + StateHealthy State = "healthy" + + // StateUnhealthy indicates a heartbeat has not been received since beyond the interval. + StateUnhealthy State = "unhealthy" +) + +// Scan handles reading State from the DB format +func (r *State) Scan(value interface{}) error { + switch t := value.(type) { + case []byte: + *r = State(t) + case string: + *r = State(t) + default: + return fmt.Errorf("could not process unknown type for state %T", t) + } + + return nil +} diff --git a/heartbeat/store.go b/heartbeat/store.go new file mode 100644 index 0000000000..9385bec13e --- /dev/null +++ b/heartbeat/store.go @@ -0,0 +1,162 @@ +package heartbeat + +import ( + "context" + "database/sql" + "github.com/target/goalert/permission" + "github.com/target/goalert/util" + "github.com/target/goalert/validation/validate" + + uuid "github.com/satori/go.uuid" +) + +// Store manages heartbeat checks and recording heartbeats. +type Store interface { + // Heartbeat records a heartbeat for the given heartbeat ID. + Heartbeat(context.Context, string) error + + // CreateTx creates a new heartbeat check within the transaction. + CreateTx(context.Context, *sql.Tx, *Monitor) (*Monitor, error) + + // Delete deletes the heartbeat check with the given heartbeat ID. + DeleteTx(context.Context, *sql.Tx, string) error + + // FindAllByService returns all heartbeats belonging to the given service ID. + FindAllByService(context.Context, string) ([]Monitor, error) +} + +var _ Store = &DB{} + +// DB implements Store using Postgres as a backend. +type DB struct { + db *sql.DB + + create *sql.Stmt + findAll *sql.Stmt + delete *sql.Stmt + update *sql.Stmt + getSvcID *sql.Stmt + + heartbeat *sql.Stmt +} + +func NewDB(ctx context.Context, db *sql.DB) (*DB, error) { + p := &util.Prepare{DB: db, Ctx: ctx} + return &DB{ + db: db, + + create: p.P(` + insert into heartbeat_monitors ( + id, name, service_id, heartbeat_interval + ) values ($1, $2, $3, ($4||' minutes')::interval) + `), + findAll: p.P(` + select + id, name, extract(epoch from heartbeat_interval)/60, last_state, trunc(extract(epoch from now()-last_heartbeat)/60)::int + from heartbeat_monitors + where service_id = $1 + `), + delete: p.P(` + delete from heartbeat_monitors + where id = $1 + `), + update: p.P(` + update heartbeat_monitors + set + name = $2, + heartbeat_interval = ($3||' minutes')::interval + where id = $1 + `), + getSvcID: p.P(`select service_id from heartbeat_monitors where id = $1`), + + heartbeat: p.P(` + update heartbeat_monitors + set last_heartbeat = now() + where id = $1 + `), + }, p.Err +} + +func (db *DB) CreateTx(ctx context.Context, tx *sql.Tx, m *Monitor) (*Monitor, error) { + err := permission.LimitCheckAny(ctx, permission.User, permission.Admin) + if err != nil { + return nil, err + } + n, err := m.Normalize() + if err != nil { + return nil, err + } + n.ID = uuid.NewV4().String() + + _, err = tx.Stmt(db.create).ExecContext(ctx, n.ID, n.Name, n.ServiceID, n.IntervalMinutes) + n.lastState = StateInactive + return n, err +} +func (db *DB) Heartbeat(ctx context.Context, id string) error { + err := validate.UUID("MonitorID", id) + if err != nil { + return err + } + + _, err = db.heartbeat.ExecContext(ctx, id) + return err +} +func (db *DB) DeleteTx(ctx context.Context, tx *sql.Tx, id string) error { + err := permission.LimitCheckAny(ctx, permission.User, permission.Admin) + if err != nil { + return err + } + err = validate.UUID("MonitorID", id) + if err != nil { + return err + } + s := db.delete + if tx != nil { + s = tx.Stmt(s) + } + _, err = s.ExecContext(ctx, id) + return err +} +func (db *DB) Update(ctx context.Context, m *Monitor) error { + err := permission.LimitCheckAny(ctx, permission.User, permission.Admin) + if err != nil { + return err + } + n, err := m.Normalize() + err = validate.Many(err, + validate.UUID("MonitorID", n.ID), + ) + if err != nil { + return err + } + _, err = db.update.ExecContext(ctx, n.ID, n.Name, n.IntervalMinutes) + return err +} +func (db *DB) FindAllByService(ctx context.Context, serviceID string) ([]Monitor, error) { + err := permission.LimitCheckAny(ctx, permission.User, permission.Admin) + if err != nil { + return nil, err + } + err = validate.UUID("ServiceID", serviceID) + if err != nil { + return nil, err + } + rows, err := db.findAll.QueryContext(ctx, serviceID) + if err != nil { + return nil, err + } + defer rows.Close() + + var monitors []Monitor + for rows.Next() { + var m Monitor + m.ServiceID = serviceID + err = rows.Scan(&m.ID, &m.Name, &m.IntervalMinutes, &m.lastState, &m.lastHeartbeatMinutes) + if err != nil { + return nil, err + } + monitors = append(monitors, m) + } + + return monitors, nil +} diff --git a/integrationkey/integrationkey.go b/integrationkey/integrationkey.go new file mode 100644 index 0000000000..44497608a4 --- /dev/null +++ b/integrationkey/integrationkey.go @@ -0,0 +1,25 @@ +package integrationkey + +import ( + "github.com/target/goalert/validation/validate" +) + +type IntegrationKey struct { + ID string `json:"id"` + Name string `json:"name"` + Type Type `json:"type"` + ServiceID string `json:"service_id"` +} + +func (i IntegrationKey) Normalize() (*IntegrationKey, error) { + err := validate.Many( + validate.IDName("Name", i.Name), + validate.UUID("ServiceID", i.ServiceID), + validate.OneOf("Type", i.Type, TypeGrafana, TypeGeneric, TypeEmail), + ) + if err != nil { + return nil, err + } + + return &i, nil +} diff --git a/integrationkey/integrationkey_test.go b/integrationkey/integrationkey_test.go new file mode 100644 index 0000000000..9b9bb5bb39 --- /dev/null +++ b/integrationkey/integrationkey_test.go @@ -0,0 +1,36 @@ +package integrationkey + +import ( + "testing" +) + +func TestIntegrationKey_Normalize(t *testing.T) { + test := func(valid bool, k IntegrationKey) { + name := "valid" + if !valid { + name = "invalid" + } + t.Run(name, func(t *testing.T) { + t.Logf("%+v", k) + _, err := k.Normalize() + if valid && err != nil { + t.Errorf("got %v; want nil", err) + } else if !valid && err == nil { + t.Errorf("got nil err; want non-nil") + } + }) + } + + valid := []IntegrationKey{ + {Name: "SampleIntegrationKey", ServiceID: "e93facc0-4764-012d-7bfb-002500d5d1a6", Type: TypeGrafana}, + } + invalid := []IntegrationKey{ + {}, + } + for _, k := range valid { + test(true, k) + } + for _, k := range invalid { + test(false, k) + } +} diff --git a/integrationkey/store.go b/integrationkey/store.go new file mode 100644 index 0000000000..990f20fc4e --- /dev/null +++ b/integrationkey/store.go @@ -0,0 +1,205 @@ +package integrationkey + +import ( + "context" + "database/sql" + "github.com/target/goalert/permission" + "github.com/target/goalert/util" + "github.com/target/goalert/validation" + "github.com/target/goalert/validation/validate" + + "github.com/lib/pq" + "github.com/pkg/errors" + uuid "github.com/satori/go.uuid" +) + +type Store interface { + Authorize(ctx context.Context, id string, integrationType Type) (context.Context, error) + GetServiceID(ctx context.Context, id string, integrationType Type) (string, error) + Create(ctx context.Context, i *IntegrationKey) (*IntegrationKey, error) + CreateKeyTx(context.Context, *sql.Tx, *IntegrationKey) (*IntegrationKey, error) + FindOne(ctx context.Context, id string) (*IntegrationKey, error) + FindAllByService(ctx context.Context, id string) ([]IntegrationKey, error) + Delete(ctx context.Context, id string) error + DeleteTx(ctx context.Context, tx *sql.Tx, id string) error + DeleteManyTx(ctx context.Context, tx *sql.Tx, ids []string) error +} + +type DB struct { + db *sql.DB + + getServiceID *sql.Stmt + create *sql.Stmt + findOne *sql.Stmt + findAllByService *sql.Stmt + delete *sql.Stmt +} + +func NewDB(ctx context.Context, db *sql.DB) (*DB, error) { + p := &util.Prepare{DB: db, Ctx: ctx} + + return &DB{ + db: db, + + getServiceID: p.P("SELECT service_id FROM integration_keys WHERE id = $1 AND type = $2"), + create: p.P("INSERT INTO integration_keys (id, name, type, service_id) VALUES ($1, $2, $3, $4)"), + findOne: p.P("SELECT id, name, type, service_id FROM integration_keys WHERE id = $1"), + findAllByService: p.P("SELECT id, name, type, service_id FROM integration_keys WHERE service_id = $1"), + delete: p.P("DELETE FROM integration_keys WHERE id = any($1)"), + }, p.Err +} + +func (db *DB) Authorize(ctx context.Context, id string, t Type) (context.Context, error) { + var serviceID string + var err error + permission.SudoContext(ctx, func(c context.Context) { + serviceID, err = db.GetServiceID(c, id, t) + }) + if err == sql.ErrNoRows { + return ctx, validation.NewFieldError("IntegrationKeyID", "not found") + } + if err != nil { + return ctx, errors.Wrap(err, "lookup serviceID") + } + ctx = permission.ServiceSourceContext(ctx, serviceID, &permission.SourceInfo{ + Type: permission.SourceTypeIntegrationKey, + ID: id, + }) + return ctx, nil +} + +func (db *DB) GetServiceID(ctx context.Context, id string, t Type) (string, error) { + err := validate.Many( + validate.UUID("IntegrationKeyID", id), + validate.OneOf("IntegrationType", t, TypeGrafana, TypeGeneric, TypeEmail), + ) + if err != nil { + return "", err + } + err = permission.LimitCheckAny(ctx, permission.System, permission.Admin, permission.User) + if err != nil { + return "", err + } + + row := db.getServiceID.QueryRowContext(ctx, id, t) + + var serviceID string + err = row.Scan(&serviceID) + if err == sql.ErrNoRows { + return "", err + } + if err != nil { + return "", errors.WithMessage(err, "lookup failure") + } + + return serviceID, nil +} + +func (db *DB) Create(ctx context.Context, i *IntegrationKey) (*IntegrationKey, error) { + return db.CreateKeyTx(ctx, nil, i) +} + +func (db *DB) CreateKeyTx(ctx context.Context, tx *sql.Tx, i *IntegrationKey) (*IntegrationKey, error) { + err := permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return nil, err + } + + n, err := i.Normalize() + if err != nil { + return nil, err + } + + stmt := db.create + if tx != nil { + stmt = tx.Stmt(stmt) + } + + n.ID = uuid.NewV4().String() + _, err = stmt.ExecContext(ctx, n.ID, n.Name, n.Type, n.ServiceID) + if err != nil { + return nil, err + } + return n, nil +} + +func (db *DB) Delete(ctx context.Context, id string) error { + return db.DeleteTx(ctx, nil, id) +} +func (db *DB) DeleteTx(ctx context.Context, tx *sql.Tx, id string) error { + return db.DeleteManyTx(ctx, tx, []string{id}) +} +func (db *DB) DeleteManyTx(ctx context.Context, tx *sql.Tx, ids []string) error { + err := permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return err + } + err = validate.ManyUUID("IntegrationKeyID", ids, 50) + if err != nil { + return err + } + + s := db.delete + if tx != nil { + s = tx.Stmt(s) + } + _, err = s.ExecContext(ctx, pq.StringArray(ids)) + return err +} + +func (db *DB) FindOne(ctx context.Context, id string) (*IntegrationKey, error) { + err := validate.UUID("IntegrationKeyID", id) + if err != nil { + return nil, err + } + + err = permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return nil, err + } + + row := db.findOne.QueryRowContext(ctx, id) + var i IntegrationKey + err = scanFrom(&i, row.Scan) + if err != nil { + return nil, err + } + + return &i, nil + +} + +func (db *DB) FindAllByService(ctx context.Context, serviceID string) ([]IntegrationKey, error) { + err := validate.UUID("ServiceID", serviceID) + if err != nil { + return nil, err + } + + err = permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return nil, err + } + + rows, err := db.findAllByService.QueryContext(ctx, serviceID) + if err != nil { + return nil, err + } + defer rows.Close() + return scanAllFrom(rows) +} + +func scanFrom(i *IntegrationKey, f func(args ...interface{}) error) error { + return f(&i.ID, &i.Name, &i.Type, &i.ServiceID) +} + +func scanAllFrom(rows *sql.Rows) (integrationKeys []IntegrationKey, err error) { + var i IntegrationKey + for rows.Next() { + err = scanFrom(&i, rows.Scan) + if err != nil { + return nil, err + } + integrationKeys = append(integrationKeys, i) + } + return integrationKeys, nil +} diff --git a/integrationkey/type.go b/integrationkey/type.go new file mode 100644 index 0000000000..55db6d3cdb --- /dev/null +++ b/integrationkey/type.go @@ -0,0 +1,33 @@ +package integrationkey + +import ( + "database/sql/driver" + "fmt" +) + +// Type is the entity that needs an integration. +type Type string + +// Types +const ( + TypeGrafana Type = "grafana" + TypeGeneric Type = "generic" + TypeEmail Type = "email" +) + +func (s Type) Value() (driver.Value, error) { + str := string(s) + return str, nil +} + +func (s *Type) Scan(value interface{}) error { + switch t := value.(type) { + case []byte: + *s = Type(t) + case string: + *s = Type(t) + default: + return fmt.Errorf("could not process unknown type for source %T", t) + } + return nil +} diff --git a/internal/generatemocks.go b/internal/generatemocks.go new file mode 100644 index 0000000000..7573bd11f7 --- /dev/null +++ b/internal/generatemocks.go @@ -0,0 +1,56 @@ +package main + +import ( + "fmt" + "github.com/target/goalert/alert" + "github.com/target/goalert/escalation" + "github.com/target/goalert/schedule" + "github.com/target/goalert/schedule/rotation" + "github.com/target/goalert/schedule/rule" + "os" + "os/exec" + "path" + "reflect" + "strings" +) + +func gen(types ...interface{}) { + for _, t := range types { + elem := reflect.TypeOf(t).Elem() + run(elem.PkgPath(), elem.Name()) + } +} +func run(pkg string, iface string) { + parts := strings.Split(pkg, "/") + pName := parts[len(parts)-1] + dir := path.Join("internal", "mocks", "mock_"+pName) + os.MkdirAll(dir, 0755) + + file := path.Join(dir, "mock"+strings.ToLower(iface)+".go") + fd, err := os.Create(file) + if err != nil { + fmt.Println("ERROR:", err.Error()) + os.Exit(1) + } + defer fd.Close() + + cmd := exec.Command("mockgen", pkg, iface) + cmd.Stdout = fd + cmd.Stderr = os.Stderr + err = cmd.Run() + if err != nil { + fmt.Println("ERROR:", err.Error()) + os.Exit(1) + } +} + +func main() { + gen( + (*escalation.Store)(nil), + (*escalation.Manager)(nil), + (*rule.Store)(nil), + (*schedule.Store)(nil), + (*rotation.Store)(nil), + (*alert.Store)(nil), + ) +} diff --git a/internal/match/assignmentmatchers.go b/internal/match/assignmentmatchers.go new file mode 100644 index 0000000000..c702533abe --- /dev/null +++ b/internal/match/assignmentmatchers.go @@ -0,0 +1,53 @@ +package match + +import ( + "fmt" + "github.com/target/goalert/assignment" + + "github.com/golang/mock/gomock" +) + +func Target(t assignment.Target) gomock.Matcher { + return asnTgtMatcher{Target: t} +} +func TargetValue(t assignment.TargetType, id string) gomock.Matcher { + return Target(assignment.RawTarget{ + ID: id, + Type: t, + }) +} +func Source(s assignment.Source) gomock.Matcher { + return asnSrcMatcher{Source: s} +} +func SourceValue(s assignment.SrcType, id string) gomock.Matcher { + return Source(assignment.RawSource{ + ID: id, + Type: s, + }) +} + +type asnTgtMatcher struct{ assignment.Target } + +func (m asnTgtMatcher) Matches(x interface{}) bool { + t, ok := x.(assignment.Target) + if !ok { + return false + } + return t.TargetType() == m.TargetType() && t.TargetID() == m.TargetID() +} +func (m asnTgtMatcher) String() string { + return fmt.Sprintf("%s(%s)", m.TargetType().String(), m.TargetID()) +} + +type asnSrcMatcher struct{ assignment.Source } + +func (m asnSrcMatcher) Matches(x interface{}) bool { + s, ok := x.(assignment.Source) + if !ok { + return false + } + return s.SourceType() == m.SourceType() && s.SourceID() == m.SourceID() +} +func (m asnSrcMatcher) String() string { + return fmt.Sprintf("%s(%s)", m.SourceType().String(), m.SourceID()) +} diff --git a/keyring/keys.go b/keyring/keys.go new file mode 100644 index 0000000000..4fa6faa0f0 --- /dev/null +++ b/keyring/keys.go @@ -0,0 +1,43 @@ +package keyring + +import ( + "crypto/rand" + "crypto/x509" + "encoding/pem" + + "github.com/pkg/errors" +) + +// Keys represents a set of encryption/decryption keys. +type Keys [][]byte + +// Encrypt will encrypt and then encode data into PEM-format. +func (k Keys) Encrypt(label string, data []byte) ([]byte, error) { + if len(k) == 0 { + k = Keys{[]byte{}} + } + block, err := x509.EncryptPEMBlock(rand.Reader, label, data, k[0], x509.PEMCipherAES256) + if err != nil { + return nil, err + } + data = pem.EncodeToMemory(block) + return data, err +} + +// Decrypt will decrypt PEM-encoded data using the first successful key. The index of +// the used key is returned as n. +func (k Keys) Decrypt(pemData []byte) (data []byte, n int, err error) { + if len(k) == 0 { + k = Keys{[]byte{}} + } + block, _ := pem.Decode(pemData) + + for i, key := range k { + data, err = x509.DecryptPEMBlock(block, key) + if err == nil { + return data, i, nil + } + } + + return nil, -1, errors.New("invalid decryption key") +} diff --git a/keyring/store.go b/keyring/store.go new file mode 100644 index 0000000000..cab5206b1a --- /dev/null +++ b/keyring/store.go @@ -0,0 +1,596 @@ +package keyring + +import ( + "bytes" + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/sha512" + "crypto/x509" + "database/sql" + "encoding/binary" + "encoding/json" + "github.com/target/goalert/util" + "github.com/target/goalert/util/log" + "github.com/target/goalert/validation/validate" + "math/big" + "sync" + "time" + + jwt "github.com/dgrijalva/jwt-go" + "github.com/pkg/errors" +) + +func init() { + jwt.RegisterSigningMethod("ES224", func() jwt.SigningMethod { + return &jwt.SigningMethodECDSA{ + Name: "ES224", + Hash: crypto.SHA512_224, + KeySize: 28, + CurveBits: 224, + } + }) +} + +// A Keyring allows signing and verifying messages. +type Keyring interface { + RotateKeys(ctx context.Context) error + + Sign(p []byte) ([]byte, error) + Verify(p []byte, signature []byte) (valid, oldKey bool) + + SignJWT(jwt.Claims) (string, error) + VerifyJWT(string, jwt.Claims) (bool, error) + + Shutdown(context.Context) error +} + +var _ Keyring = &DB{} + +type header struct { + Version byte + KeyIndex byte +} + +type v1Signature struct { + RLen, SLen byte + R [28]byte + S [28]byte +} + +// Config allows specifying operational parameters of a keyring. +type Config struct { + // Name is the unique identifier of this keyring. + Name string + + // RotationDays is the number of days between automatic rotations. If zero, automatic rotation is disabled. + RotationDays int + + // MaxOldKeys determines how many old keys (1-254) are kept for validation. This value, multiplied by RotationDays + // determines the minimum amount of time a signature remains valid. + MaxOldKeys int + + // Keys specifies a set of keys to use for encrypting and decrypting the private key. + Keys Keys +} + +// DB implements a Keyring using postgres as the datastore. +type DB struct { + db *sql.DB + + cfg Config + + verificationKeys map[byte]ecdsa.PublicKey + signingKey *ecdsa.PrivateKey + rotationCount int + + mx sync.RWMutex + shutdown chan context.Context + forceRotate chan chan error + + fetchKeys *sql.Stmt + setKeys *sql.Stmt + txTime *sql.Stmt + insertKeys *sql.Stmt + + parser *jwt.Parser +} + +func marshalVerificationKeys(keys map[byte]ecdsa.PublicKey) ([]byte, error) { + m := make(map[byte][]byte, len(keys)) + var err error + for id, key := range keys { + m[id], err = x509.MarshalPKIXPublicKey(&key) + if err != nil { + return nil, err + } + } + return json.Marshal(m) +} +func parseVerificationKeys(data []byte) (map[byte]ecdsa.PublicKey, error) { + var m map[byte][]byte + err := json.Unmarshal(data, &m) + if err != nil { + return nil, err + } + + res := make(map[byte]ecdsa.PublicKey, len(m)) + for id, data := range m { + key, err := x509.ParsePKIXPublicKey(data) + if err != nil { + // ignore broken keys for verification + continue + } + if k, ok := key.(*ecdsa.PublicKey); ok { + res[id] = *k + } + } + + return res, nil +} + +// NewDB creates a new postgres-backed keyring. +func NewDB(ctx context.Context, db *sql.DB, cfg *Config) (*DB, error) { + if cfg == nil { + cfg = &Config{Name: "default"} + } + if cfg.MaxOldKeys == 0 { + cfg.MaxOldKeys = 1 + } + err := validate.Many( + validate.IDName("Name", cfg.Name), + + // keyspace is 256 (1 byte); need 1 for current key, and 1 for next key leaving 254 possible slots for old ones + validate.Range("MaxOldKeys", cfg.MaxOldKeys, 1, 254), + + validate.Range("RotationDays", cfg.RotationDays, 0, 9000), + ) + if err != nil { + return nil, err + } + p := &util.Prepare{DB: db, Ctx: ctx} + d := &DB{ + db: db, + cfg: *cfg, + + forceRotate: make(chan chan error), + shutdown: make(chan context.Context), + + parser: &jwt.Parser{ValidMethods: []string{"ES224"}}, + + txTime: p.P(`select now()`), + insertKeys: p.P(` + insert into keyring ( + id, + verification_keys, + signing_key, + next_key, + next_rotation, + rotation_count + ) values ( + $1, $2, $3, $4, $5, 0 + ) + on conflict do nothing + `), + fetchKeys: p.P(` + select + verification_keys, + signing_key, + next_key, + now(), + next_rotation, + rotation_count + from keyring + where id = $1 + for update + `), + setKeys: p.P(` + update keyring + set + verification_keys = $2, + signing_key = $3, + next_key = $4, + next_rotation = $5, + rotation_count = $6 + where id = $1 + `), + } + + if p.Err != nil { + return nil, p.Err + } + + err = d.refreshAndRotateKeys(ctx, false) + if err != nil { + return nil, err + } + + go d.loop() + return d, nil +} + +// Shutdown allows gracefully shutting down the keyring (e.g. auto rotations) after +// finishing any in-progress rotations. +func (db *DB) Shutdown(ctx context.Context) error { + if db == nil { + return nil + } + db.shutdown <- ctx + + // wait for it to complete + <-db.shutdown + return nil +} +func (db *DB) loop() { + t := time.NewTicker(12 * time.Hour) + var shutdownCtx context.Context + defer close(db.shutdown) +mainLoop: + for { + select { + case <-t.C: + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + err := db.refreshAndRotateKeys(ctx, false) + cancel() + if err != nil { + log.Log(ctx, err) + } + case shutdownCtx = <-db.shutdown: + break mainLoop + case ch := <-db.forceRotate: + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + ch <- db.refreshAndRotateKeys(ctx, true) + cancel() + } + } + + // respond to any pending force rotation calls + close(db.forceRotate) + for ch := range db.forceRotate { + ctx, cancel := context.WithTimeout(shutdownCtx, time.Minute) + ch <- db.refreshAndRotateKeys(ctx, true) + cancel() + } +} + +func (db *DB) newKey() (*ecdsa.PrivateKey, []byte, error) { + key, err := ecdsa.GenerateKey(elliptic.P224(), rand.Reader) + if err != nil { + return nil, nil, err + } + data, err := x509.MarshalECPrivateKey(key) + if err != nil { + return nil, nil, err + } + data, err = db.cfg.Keys.Encrypt("ECDSA PRIVATE KEY", data) + if err != nil { + return nil, nil, err + } + return key, data, nil +} +func (db *DB) loadKey(encData []byte) (*ecdsa.PrivateKey, error) { + data, _, err := db.cfg.Keys.Decrypt(encData) + if err != nil { + return nil, err + } + + key, err := x509.ParseECPrivateKey(data) + if err != nil { + return nil, err + } + + return key, nil +} + +func (db *DB) commitNewKeyring(ctx context.Context, tx *sql.Tx) error { + var t time.Time + err := tx.Stmt(db.txTime).QueryRowContext(ctx).Scan(&t) + if err != nil { + return err + } + signKey, signData, err := db.newKey() + if err != nil { + return err + } + nextKey, nextData, err := db.newKey() + if err != nil { + return err + } + + v := map[byte]ecdsa.PublicKey{ + 0: signKey.PublicKey, + 1: nextKey.PublicKey, + } + + vData, err := marshalVerificationKeys(v) + if err != nil { + return err + } + + var nextRotTime interface{} + if db.cfg.RotationDays > 0 { + // We want to wait an explicit amount of time, rather than rotating by date. + // + // Specifically, if multiple instances of GoAlert happen to run on systems of differing + // timezones, they should be able to agree on handoff times. + nextRotTime = t.Add(time.Hour * 24 * time.Duration(db.cfg.RotationDays)) + } + + res, err := tx.Stmt(db.insertKeys).ExecContext(ctx, db.cfg.Name, vData, signData, nextData, nextRotTime) + if err != nil { + return err + } + rowCount, err := res.RowsAffected() + if err != nil { + return err + } + err = tx.Commit() + if err != nil { + return err + } + + var rotationCount int + + if rowCount == 0 { + // failed to insert the new data, so scan old & refresh + var vKeysData, signKeyData, nextKeyData []byte + var rotateT time.Time + err = db.fetchKeys.QueryRowContext(ctx, db.cfg.Name).Scan(&vKeysData, &signKeyData, &nextKeyData, &t, &rotateT, &rotationCount) + if err != nil { + return err + } + + v, err = parseVerificationKeys(vKeysData) + if err != nil { + return err + } + + signKey, err = db.loadKey(signKeyData) + if err != nil { + // if we can't get the sign key -- we will at least move forward with the verification keys + log.Log(ctx, errors.Wrap(err, "load signing key")) + } + } + + db.mx.Lock() + defer db.mx.Unlock() + + db.verificationKeys = v + db.signingKey = signKey + db.rotationCount = rotationCount + + return nil +} + +func (db *DB) rotateVerificationKeys(m map[byte]ecdsa.PublicKey, n int, newKey ecdsa.PublicKey) map[byte]ecdsa.PublicKey { + newM := make(map[byte]ecdsa.PublicKey, len(m)+1) + for i := n - db.cfg.MaxOldKeys; i <= n; i++ { + if key, ok := m[byte(i)]; ok { + newM[byte(i)] = key + } + } + newM[byte(n+1)] = newKey + return newM +} + +// RotateKeys will force a key rotation. +func (db *DB) RotateKeys(ctx context.Context) error { + ch := make(chan error) + db.forceRotate <- ch + return <-ch +} + +// refreshAndRotateKeys will perform a key rotation, and cleanup expired keys when appropriate. If forceRotation +// is true, a rotation will always happen -- even if RotationDays is zero (disabled). It also +// ensures the current key configuration is up-to-date. +// +// When a key is rotated, a new key is generated and inserted. +func (db *DB) refreshAndRotateKeys(ctx context.Context, forceRotation bool) error { + tx, err := db.db.BeginTx(ctx, nil) + if err != nil { + return err + } + defer tx.Rollback() + + row := tx.Stmt(db.fetchKeys).QueryRowContext(ctx, db.cfg.Name) + + var verificationKeys map[byte]ecdsa.PublicKey + + var vKeysData, signKeyData, nextKeyData []byte + var t time.Time + var rotateT *time.Time + var count int + err = row.Scan(&vKeysData, &signKeyData, &nextKeyData, &t, &rotateT, &count) + if err == sql.ErrNoRows { + return db.commitNewKeyring(ctx, tx) + } + if err != nil { + return err + } + + verificationKeys, err = parseVerificationKeys(vKeysData) + if err != nil { + return errors.Wrap(err, "unmarshal verification keys") + } + + if forceRotation || (rotateT != nil && !t.Before(*rotateT)) { + // perform a key rotation + signKeyData = nextKeyData + var nextKey *ecdsa.PrivateKey + nextKey, nextKeyData, err = db.newKey() + if err != nil { + return err + } + count++ + verificationKeys = db.rotateVerificationKeys(verificationKeys, count, nextKey.PublicKey) + vKeysData, err = marshalVerificationKeys(verificationKeys) + if err != nil { + return err + } + var nextRotTime interface{} + if db.cfg.RotationDays > 0 { + // We want to wait an explicit amount of time, rather than rotating by date. + // + // Specifically, if multiple instances of GoAlert happen to run on systems of differing + // timezones, they should be able to agree on handoff times. + nextRotTime = t.Add(time.Hour * 24 * time.Duration(db.cfg.RotationDays)) + } + _, err := tx.Stmt(db.setKeys).ExecContext(ctx, db.cfg.Name, vKeysData, signKeyData, nextKeyData, nextRotTime, count) + if err != nil { + return err + } + err = tx.Commit() + if err != nil { + return err + } + } + + signKey, err := db.loadKey(signKeyData) + if err != nil { + log.Log(ctx, errors.Wrap(err, "load signing key")) + } + + db.mx.Lock() + defer db.mx.Unlock() + + db.verificationKeys = verificationKeys + db.signingKey = signKey + db.rotationCount = count + + return nil +} + +func (db *DB) SignJWT(c jwt.Claims) (string, error) { + db.mx.RLock() + defer db.mx.RUnlock() + + if db.signingKey == nil { + return "", errors.New("signing key unavailable") + } + + tok := jwt.NewWithClaims(jwt.GetSigningMethod("ES224"), c) + tok.Header["key"] = byte(db.rotationCount % 256) + + return tok.SignedString(db.signingKey) +} + +// Sign will sign a message and return the signature. +func (db *DB) Sign(p []byte) ([]byte, error) { + db.mx.RLock() + defer db.mx.RUnlock() + + if db.signingKey == nil { + return nil, errors.New("signing key unavailable") + } + + hdr := header{ + Version: 1, // v1 is latest + KeyIndex: byte(db.rotationCount % 256), + } + + sum := sha512.Sum512_224(p) + r, s, err := ecdsa.Sign(rand.Reader, db.signingKey, sum[:]) + if err != nil { + return nil, err + } + var v1sig v1Signature + v1sig.RLen = byte(len(r.Bytes())) + v1sig.SLen = byte(len(s.Bytes())) + copy(v1sig.R[:], r.Bytes()) + copy(v1sig.S[:], s.Bytes()) + + buf := new(bytes.Buffer) + err = binary.Write(buf, binary.BigEndian, hdr) + if err != nil { + return nil, err + } + err = binary.Write(buf, binary.BigEndian, v1sig) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (db *DB) VerifyJWT(s string, c jwt.Claims) (bool, error) { + db.mx.RLock() + defer db.mx.RUnlock() + + var currentKey bool + _, err := db.parser.ParseWithClaims(s, c, func(tok *jwt.Token) (interface{}, error) { + keyIndex, ok := tok.Header["key"].(float64) + if !ok { + return nil, errors.New("invalid key index") + } + key, ok := db.verificationKeys[byte(keyIndex)] + if !ok { + return nil, errors.New("invalid key") + } + + currentKey = byte(keyIndex) == byte(db.rotationCount) || byte(keyIndex) == byte(db.rotationCount+1) + return &key, nil + }) + if err != nil { + return false, err + } + + return currentKey, nil +} + +// Verify will validate the signature and metadata, and optionally length, of a message. +func (db *DB) Verify(p []byte, signature []byte) (valid, oldKey bool) { + db.mx.RLock() + defer db.mx.RUnlock() + + buf := bytes.NewBuffer(signature) + var hdr header + err := binary.Read(buf, binary.BigEndian, &hdr) + // The only error here for the bytes.Buffer is if it's too short + // which just means it's an invalid message. + if err != nil { + return false, false + } + + // only v1 is supported currently + if hdr.Version != 1 { + return false, false + } + + var v1sig v1Signature + err = binary.Read(buf, binary.BigEndian, &v1sig) + if err != nil { + return false, false + } + + // signature should not include any trailing/extra data + if buf.Len() != 0 { + return false, false + } + + if v1sig.RLen > 28 || v1sig.SLen > 28 { + return false, false + } + + key, ok := db.verificationKeys[hdr.KeyIndex] + if !ok { + return false, false + } + // ensure key exists + r := big.NewInt(0) + s := big.NewInt(0) + r.SetBytes(v1sig.R[:v1sig.RLen]) + s.SetBytes(v1sig.S[:v1sig.SLen]) + + sum := sha512.Sum512_224(p) + valid = ecdsa.Verify(&key, sum[:], r, s) + if !valid { + return false, false + } + + output := make([]byte, buf.Len()) + copy(output, buf.Bytes()) + oldKey = hdr.KeyIndex != byte(db.rotationCount) && hdr.KeyIndex != byte(db.rotationCount+1) + return true, oldKey +} diff --git a/keyring/store_test.go b/keyring/store_test.go new file mode 100644 index 0000000000..d899fb4ea4 --- /dev/null +++ b/keyring/store_test.go @@ -0,0 +1,51 @@ +package keyring + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "testing" + + uuid "github.com/satori/go.uuid" +) + +func TestSignVerify(t *testing.T) { + signKey, err := ecdsa.GenerateKey(elliptic.P224(), rand.Reader) + if err != nil { + t.Fatal(err) + } + + v := map[byte]ecdsa.PublicKey{ + 0: signKey.PublicKey, + } + + db := &DB{ + verificationKeys: v, + signingKey: signKey, + } + var buf bytes.Buffer + try := func(t *testing.T) { + sessID := uuid.NewV4() + buf.WriteByte('S') // session IDs will be prefixed with an "S" + buf.Write(sessID.Bytes()) + sig, err := db.Sign(buf.Bytes()) + if err != nil { + t.Fatal(err) + } + + valid, old := db.Verify(buf.Bytes(), sig) + if !valid { + t.Fatal("validation failed") + } + if old { + t.Fatal("old key used") + } + buf.Reset() + } + + for i := 0; i < 100; i++ { + // running multiple because not all signatures are the same size (encoded) + t.Run("", try) + } +} diff --git a/label/label.go b/label/label.go new file mode 100644 index 0000000000..828e6cdb7d --- /dev/null +++ b/label/label.go @@ -0,0 +1,23 @@ +package label + +import ( + "github.com/target/goalert/assignment" + "github.com/target/goalert/validation/validate" +) + +// A Label is a key-value pair assigned to a target. +type Label struct { + Target assignment.Target + Key string `json:"key"` + Value string `json:"value"` +} + +// Normalize will validate and normalize the label, returning a copy. +func (l Label) Normalize() (*Label, error) { + return &l, validate.Many( + validate.OneOf("TargetType", l.Target.TargetType(), assignment.TargetTypeService), + validate.UUID("TargetID", l.Target.TargetID()), + validate.LabelKey("Key", l.Key), + validate.LabelValue("Value", l.Value), + ) +} diff --git a/label/search.go b/label/search.go new file mode 100644 index 0000000000..2c8bbe8c1c --- /dev/null +++ b/label/search.go @@ -0,0 +1,189 @@ +package label + +import ( + "context" + "database/sql" + "github.com/target/goalert/assignment" + "github.com/target/goalert/permission" + "github.com/target/goalert/search" + "github.com/target/goalert/validation/validate" + "strconv" + "strings" + "text/template" + + "github.com/lib/pq" + "github.com/pkg/errors" +) + +// SearchOptions allow filtering and paginating the list of rotations. +type SearchOptions struct { + Search string `json:"s,omitempty"` + After SearchCursor `json:"a,omitempty"` + + // Omit specifies a list of key names to exclude from the results. + Omit []string `json:"o,omitempty"` + + Limit int `json:"-"` + + UniqueKeys bool `json:"u,omitempty"` +} + +// SearchCursor is used to indicate a position in a paginated list. +type SearchCursor struct { + Key string `json:"k,omitempty"` + TargetID string `json:"t,omitempty"` + TargetType assignment.TargetType `json:"y,omitempty"` +} + +var searchTemplate = template.Must(template.New("search").Parse(` + SELECT{{if .UniqueKeys}} distinct on (lower(key)){{end}} + key, value, tgt_service_id + FROM labels l + WHERE true + {{if .Omit}} + AND not key = any(:omit) + {{end}} + {{if .KeySearch}} + AND (l.key ILIKE :keySearch) + {{end}} + {{if .ValueSearch}} + AND ({{if .ValueNegate}}NOT {{end}}l.value ILIKE :valueSearch) + {{end}} + {{if .After.Key}} + AND (lower(l.key) > lower(:afterKey) AND l.tgt_service_id > :afterServiceID) + {{end}} + ORDER BY lower(key), tgt_service_id + LIMIT {{.Limit}} +`)) + +type renderData SearchOptions + +func (opts renderData) ValueNegate() bool { + idx := strings.IndexRune(opts.Search, '=') + return idx > 0 && opts.Search[idx-1] == '!' +} +func (opts renderData) KeySearch() string { + if opts.Search == "" { + return "" + } + + idx := strings.IndexRune(opts.Search, '=') + if idx != -1 { + s := search.Escape(strings.TrimSuffix(opts.Search[:idx], "!")) + if s == "*" { + return "" + } + // Equal sign denotes exact match, however + // up to 2 wildcards are supported via '*'. + return strings.Replace(s, "*", "%", 2) + } + + return "%" + search.Escape(opts.Search) + "%" +} +func (opts renderData) ValueSearch() string { + if opts.Search == "" { + return "" + } + + idx := strings.IndexRune(opts.Search, '=') + if idx == -1 { + return "" + } + s := search.Escape(opts.Search[idx+1:]) + if s == "*" { + return "" + } + + return strings.Replace(s, "*", "%", 2) +} + +func (opts renderData) Normalize() (*renderData, error) { + if opts.Limit == 0 { + opts.Limit = search.DefaultMaxResults + } + + err := validate.Many( + validate.Text("Search", opts.Search, 0, search.MaxQueryLen), + validate.Range("Limit", opts.Limit, 0, search.MaxResults), + validate.Range("Omit", len(opts.Omit), 0, 50), + ) + + if opts.After.Key != "" { + err = validate.Many(err, validate.LabelKey("After.Key", opts.After.Key)) + } + + if err != nil { + return nil, err + } + for i, key := range opts.Omit { + err = validate.Many(err, + validate.LabelKey("Omit["+strconv.Itoa(i)+"]", key), + ) + } + + return &opts, err +} + +func (opts renderData) QueryArgs() []sql.NamedArg { + var afterServiceID string + if opts.After.TargetType == assignment.TargetTypeService { + afterServiceID = opts.After.TargetID + } + return []sql.NamedArg{ + sql.Named("keySearch", opts.KeySearch()), + sql.Named("valueSearch", opts.ValueSearch()), + sql.Named("afterKey", opts.After.Key), + sql.Named("afterServiceID", afterServiceID), + sql.Named("omit", pq.StringArray(opts.Omit)), + } +} + +func (db *DB) Search(ctx context.Context, opts *SearchOptions) ([]Label, error) { + err := permission.LimitCheckAny(ctx, permission.User) + if err != nil { + return nil, err + } + if opts == nil { + opts = &SearchOptions{} + } + data, err := (*renderData)(opts).Normalize() + if err != nil { + return nil, err + } + query, args, err := search.RenderQuery(ctx, searchTemplate, data) + if err != nil { + return nil, errors.Wrap(err, "render query") + } + + rows, err := db.db.QueryContext(ctx, query, args...) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, err + } + defer rows.Close() + + var result []Label + var l Label + var svcID sql.NullString + for rows.Next() { + err = rows.Scan( + &l.Key, + &l.Value, + &svcID, + ) + if err != nil { + return nil, errors.Wrap(err, "scan row") + } + + switch { + case svcID.Valid: + l.Target = assignment.ServiceTarget(svcID.String) + } + + result = append(result, l) + } + + return result, nil +} diff --git a/label/store.go b/label/store.go new file mode 100644 index 0000000000..8503d11db1 --- /dev/null +++ b/label/store.go @@ -0,0 +1,170 @@ +package label + +import ( + "context" + "database/sql" + "github.com/target/goalert/assignment" + "github.com/target/goalert/permission" + "github.com/target/goalert/util" + "github.com/target/goalert/validation/validate" + + "github.com/pkg/errors" +) + +// Store allows the lookup and management of Labels. +type Store interface { + SetTx(ctx context.Context, tx *sql.Tx, label *Label) error + FindAllByService(ctx context.Context, serviceID string) ([]Label, error) + UniqueKeysTx(ctx context.Context, tx *sql.Tx) ([]string, error) + UniqueKeys(ctx context.Context) ([]string, error) + Search(ctx context.Context, opts *SearchOptions) ([]Label, error) +} + +// DB implements the Store interface using a postgres database. +type DB struct { + db *sql.DB + + upsert *sql.Stmt + delete *sql.Stmt + findAllByService *sql.Stmt + uniqueKeys *sql.Stmt +} + +// NewDB will Set a DB backend from a sql.DB. An error will be returned if statements fail to prepare. +func NewDB(ctx context.Context, db *sql.DB) (*DB, error) { + p := &util.Prepare{DB: db, Ctx: ctx} + return &DB{ + db: db, + upsert: p.P(` + INSERT INTO labels (tgt_service_id, key, value) + VALUES ($1, $2, $3) + ON CONFLICT (key, tgt_service_id) DO UPDATE + SET value = $3 + `), + delete: p.P(` + DELETE FROM labels + WHERE tgt_service_id = $1 + AND key = $2 + `), + findAllByService: p.P(` + SELECT key, value + FROM labels + WHERE tgt_service_id = $1 + ORDER BY key ASC + `), + uniqueKeys: p.P(` + SELECT DISTINCT (key) + FROM labels + ORDER BY key ASC + `), + }, p.Err +} + +// SetTx will set a label for the service. It can be used to set the key-value pair for the label, +// delete a label or update the value given the label's key. +func (db *DB) SetTx(ctx context.Context, tx *sql.Tx, label *Label) error { + err := permission.LimitCheckAny(ctx, permission.System, permission.User) + if err != nil { + return err + } + + n, err := label.Normalize() + if err != nil { + return err + } + + if n.Value == "" { + // Delete Operation + stmt := db.delete + if tx != nil { + stmt = tx.StmtContext(ctx, stmt) + } + + _, err = stmt.ExecContext(ctx, n.Target.TargetID(), n.Key) + return errors.Wrap(err, "delete label") + } + + stmt := db.upsert + if tx != nil { + stmt = tx.StmtContext(ctx, stmt) + } + + _, err = stmt.ExecContext(ctx, n.Target.TargetID(), n.Key, n.Value) + if err != nil { + return errors.Wrap(err, "set label") + } + + return nil +} + +// FindAllByService finds all labels for a particular service. It returns all key-value pairs. +func (db *DB) FindAllByService(ctx context.Context, serviceID string) ([]Label, error) { + err := permission.LimitCheckAny(ctx, permission.System, permission.User) + if err != nil { + return nil, err + } + + err = validate.UUID("ServiceID", serviceID) + if err != nil { + return nil, err + } + rows, err := db.findAllByService.QueryContext(ctx, serviceID) + if err != nil { + return nil, err + } + defer rows.Close() + + var labels []Label + var l Label + + for rows.Next() { + err = rows.Scan( + &l.Key, + &l.Value, + ) + if err != nil { + return nil, errors.Wrap(err, "scan row") + } + + l.Target = assignment.ServiceTarget(serviceID) + + labels = append(labels, l) + } + + return labels, nil +} + +func (db *DB) UniqueKeysTx(ctx context.Context, tx *sql.Tx) ([]string, error) { + err := permission.LimitCheckAny(ctx, permission.System, permission.User) + if err != nil { + return nil, err + } + + stmt := db.uniqueKeys + if tx != nil { + stmt = tx.StmtContext(ctx, stmt) + } + + rows, err := stmt.QueryContext(ctx) + if err != nil { + return nil, err + } + defer rows.Close() + + var keys []string + + for rows.Next() { + var k string + err = rows.Scan(&k) + if err != nil { + return nil, errors.Wrap(err, "scan row") + } + + keys = append(keys, k) + } + return keys, nil +} + +func (db *DB) UniqueKeys(ctx context.Context) ([]string, error) { + return db.UniqueKeysTx(ctx, nil) +} diff --git a/limit/error.go b/limit/error.go new file mode 100644 index 0000000000..c8dbd90504 --- /dev/null +++ b/limit/error.go @@ -0,0 +1,90 @@ +package limit + +import ( + "strconv" + "strings" + + "github.com/lib/pq" + "github.com/pkg/errors" +) + +// Error represents an error caused by +type Error interface { + error + Max() int + ID() ID + Limit() bool +} + +type limitErr struct { + id ID + max int +} + +var _ Error = &limitErr{} + +// IsLimitError will determine if an error's cause is a limit.Error. +func IsLimitError(err error) bool { + if e, ok := errors.Cause(err).(Error); ok && e.Limit() { + return true + } + return false +} + +// MapError will map a Postgres error that is caused by a limit constraint. +// If the given error is not caused by a known system limit constraint, nil is returned. +func MapError(err error) Error { + e, ok := err.(*pq.Error) + if !ok { + return nil + } + if !strings.HasPrefix(e.Hint, "max=") { + return nil + } + if !strings.HasSuffix(e.Constraint, "_limit") { + return nil + } + id := ID(strings.TrimSuffix(e.Constraint, "_limit")) + if id.Valid() != nil { + return nil + } + m, err := strconv.Atoi(strings.TrimPrefix(e.Hint, "max=")) + if err != nil { + return nil + } + return &limitErr{id: id, max: m} +} + +func (l *limitErr) ClientError() bool { return true } + +func (l *limitErr) Limit() bool { return true } +func (l *limitErr) ID() ID { return l.id } +func (l *limitErr) Max() int { return l.max } +func (l *limitErr) Error() string { + switch l.id { + case NotificationRulesPerUser: + return "too many notification rules" + case ContactMethodsPerUser: + return "too many contact methods" + case EPStepsPerPolicy: + return "too many steps on this policy" + case EPActionsPerStep: + return "too many actions on this step" + case ParticipantsPerRotation: + return "too many participants on this rotation" + case RulesPerSchedule: + return "too many rules on this schedule" + case IntegrationKeysPerService: + return "too many integration keys on this service" + case UnackedAlertsPerService: + return "too many unacknowledged alerts for this service" + case TargetsPerSchedule: + return "too many targets on this schedule" + case HeartbeatMonitorsPerService: + return "too many heartbeat monitors on this service" + case UserOverridesPerSchedule: + return "too many user overrides on this schedule" + } + + return "exceeded limit" +} diff --git a/limit/id.go b/limit/id.go new file mode 100644 index 0000000000..0ba3dc05ca --- /dev/null +++ b/limit/id.go @@ -0,0 +1,38 @@ +package limit + +import "github.com/target/goalert/validation/validate" + +// ID represents the identifier for a given system limit. +type ID string + +// IDs of configurable limits. +const ( + NotificationRulesPerUser ID = "notification_rules_per_user" + ContactMethodsPerUser ID = "contact_methods_per_user" + EPStepsPerPolicy ID = "ep_steps_per_policy" + EPActionsPerStep ID = "ep_actions_per_step" + ParticipantsPerRotation ID = "participants_per_rotation" + RulesPerSchedule ID = "rules_per_schedule" + IntegrationKeysPerService ID = "integration_keys_per_service" + UnackedAlertsPerService ID = "unacked_alerts_per_service" + TargetsPerSchedule ID = "targets_per_schedule" + HeartbeatMonitorsPerService ID = "heartbeat_monitors_per_service" + UserOverridesPerSchedule ID = "user_overrides_per_schedule" +) + +// Valid returns nil if a given ID is valid, a validation error is returned otherwise. +func (id ID) Valid() error { + return validate.OneOf("LimitID", id, + NotificationRulesPerUser, + ContactMethodsPerUser, + EPStepsPerPolicy, + EPActionsPerStep, + ParticipantsPerRotation, + RulesPerSchedule, + IntegrationKeysPerService, + UnackedAlertsPerService, + TargetsPerSchedule, + HeartbeatMonitorsPerService, + UserOverridesPerSchedule, + ) +} diff --git a/limit/limits.go b/limit/limits.go new file mode 100644 index 0000000000..7b051e4a62 --- /dev/null +++ b/limit/limits.go @@ -0,0 +1,13 @@ +package limit + +// Limits contains the current value of all configurable limits. +type Limits map[ID]int + +// Max returns the current max value of the limit with the given ID. +func (l Limits) Max(id ID) int { + v, ok := l[id] + if !ok { + return -1 + } + return v +} diff --git a/limit/store.go b/limit/store.go new file mode 100644 index 0000000000..86cabd1bc3 --- /dev/null +++ b/limit/store.go @@ -0,0 +1,118 @@ +package limit + +import ( + "context" + "database/sql" + "github.com/target/goalert/permission" + "github.com/target/goalert/util" + "github.com/target/goalert/validation/validate" +) + +// A Store allows getting and setting system limits. +type Store interface { + // ResetAll will reset all configurable limits to the default (no-limit). + ResetAll(context.Context) error + + // Max will return the current max value for the given limit. + Max(context.Context, ID) (int, error) + + // SetMax allows setting the max value for a limit. + SetMax(context.Context, ID, int) error + + // All will get the current value of all limits. + All(context.Context) (Limits, error) +} + +// DB implements the Store interface against a Postgres DB. +type DB struct { + findAll *sql.Stmt + findOne *sql.Stmt + setOne *sql.Stmt + resetAll *sql.Stmt +} + +// NewDB creates a new DB and prepares all necessary SQL statements. +func NewDB(ctx context.Context, db *sql.DB) (*DB, error) { + p := &util.Prepare{DB: db, Ctx: ctx} + return &DB{ + findAll: p.P(`select id, max from config_limits`), + findOne: p.P(`select max from config_limits where id = $1`), + setOne: p.P(` + insert into config_limits (id, max) + values ($1, $2) + on conflict (id) do update + set max = $2 + `), + resetAll: p.P(`truncate config_limits`), + }, p.Err +} + +// ResetAll implements the Store interface. +func (db *DB) ResetAll(ctx context.Context) error { + err := permission.LimitCheckAny(ctx, permission.Admin) + if err != nil { + return err + } + _, err = db.resetAll.ExecContext(ctx) + return err +} + +// Max implements the Store interface. +func (db *DB) Max(ctx context.Context, id ID) (int, error) { + err := permission.LimitCheckAny(ctx, permission.Admin) + if err != nil { + return 0, err + } + err = id.Valid() + if err != nil { + return 0, err + } + var max int + err = db.findOne.QueryRowContext(ctx, id).Scan(&max) + if err == sql.ErrNoRows { + return -1, nil + } + if err != nil { + return 0, err + } + return max, nil +} + +// All implements the Store interface. +func (db *DB) All(ctx context.Context) (Limits, error) { + err := permission.LimitCheckAny(ctx, permission.Admin) + if err != nil { + return nil, err + } + rows, err := db.findAll.QueryContext(ctx) + if err != nil { + return nil, err + } + defer rows.Close() + var id string + var max int + l := make(Limits, 8) + for rows.Next() { + err = rows.Scan(&id, max) + if err != nil { + return nil, err + } + l[ID(id)] = max + } + return l, nil +} + +// SetMax implements the Store interface. +func (db *DB) SetMax(ctx context.Context, id ID, max int) error { + err := permission.LimitCheckAny(ctx, permission.Admin) + if err != nil { + return err + } + err = validate.Many(id.Valid(), validate.Range("Max", max, -1, 9000)) + if err != nil { + return err + } + + _, err = db.setOne.ExecContext(ctx, id, max) + return err +} diff --git a/lock/global.go b/lock/global.go new file mode 100644 index 0000000000..5808a88738 --- /dev/null +++ b/lock/global.go @@ -0,0 +1,11 @@ +package lock + +// Defined global lock values. +const ( + GlobalMigrate = 0x1337 // 4919 + GlobalEngineProcessing = 0x1234 // 4660 + GlobalMessageSending = 0x1330 // 4912 + RegionalEngineProcessing = 0x1342 // 4930 + ModularEngineProcessing = 0x1347 // 4935 + GlobalSwitchOver = 0x1111 // 4369 +) diff --git a/logging/errors.slide b/logging/errors.slide new file mode 100644 index 0000000000..ce66f2f027 --- /dev/null +++ b/logging/errors.slide @@ -0,0 +1,44 @@ +Simplifying Logging + +There's Gotta Be A Better Way + +* Using a Single Value + +Currently we pass an error, and a message in a lot of places. + + if httpError(w, err, "was doing something") { + return + } + log.WithError(err).Errorln("was doing something") + +Instead, we can just use `errors.Wrap`, which returns nil if err is nil. + + if httpError(w, errors.Wrap(err, "do something")) { + return + } + +* Adding Context + +Having context when logging allows us to do things like: +- Enable debugging per-request, or per-subsystem +- Log a RequestID so we see all errors associated with a request + + if httpError(ctx, w, errors.Wrap(err, "do something")) { + return + } + +* A Simple Interface + +The `httpError` example earlier, would call some type of logger. +Let's define what that could look like. + +We care about 2 things: +- Logging things we always need to know (errors, requests) +- Logging things when debugging + +So the simplest interface could be: + + Log(ctx, err) + Debug(ctx, err) + +* \ No newline at end of file diff --git a/mailgun/mailgun.go b/mailgun/mailgun.go new file mode 100644 index 0000000000..28211cacd3 --- /dev/null +++ b/mailgun/mailgun.go @@ -0,0 +1,189 @@ +package mailgun + +import ( + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "github.com/target/goalert/alert" + "github.com/target/goalert/auth" + "github.com/target/goalert/config" + "github.com/target/goalert/integrationkey" + "github.com/target/goalert/limit" + "github.com/target/goalert/permission" + "github.com/target/goalert/retry" + "github.com/target/goalert/util/errutil" + "github.com/target/goalert/util/log" + "github.com/target/goalert/validation" + "github.com/target/goalert/validation/validate" + "io" + "net/http" + "net/mail" + "strings" + "time" + + "github.com/pkg/errors" +) + +// httpError is used to respond in a standard way to Mailgun when err != nil. If +// err is nil, false is returned, true otherwise. +// If Mailgun receives a 200 (Success) code it will determine the webhook POST is successful and not retry. +// If Mailgun receives a 406 (Not Acceptable) code, Mailgun will determine the POST is rejected and not retry. +// +// For any other code, Mailgun will retry POSTing according to the following schedule (other than the delivery notification): +// 10 minutes, 10 minutes, 15 minutes, 30 minutes, 1 hour, 2 hour and 4 hours. +func httpError(ctx context.Context, w http.ResponseWriter, err error) bool { + if err == nil { + return false + } + + type clientErr interface { + ClientError() bool + } + + if e, ok := err.(clientErr); ok && e.ClientError() { + log.Debug(ctx, err) + http.Error(w, err.Error(), http.StatusNotAcceptable) + return true + } + + if limit.IsLimitError(err) { + // don't retry if a limit has been exceeded + log.Debug(ctx, err) + http.Error(w, err.Error(), http.StatusNotAcceptable) + return true + } + + log.Log(ctx, err) + http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) + return true +} + +// validSignature is used to validate the request from Mailgun. +// If request is validated true is returned, false otherwise. +// https://documentation.mailgun.com/en/latest/user_manual.html#securing-webhooks +func validSignature(ctx context.Context, req *http.Request, apikey string) bool { + h := hmac.New(sha256.New, []byte(apikey)) + io.WriteString(h, req.FormValue("timestamp")) + io.WriteString(h, req.FormValue("token")) + + calculatedSignature := h.Sum(nil) + signature, err := hex.DecodeString(req.FormValue("signature")) + if err != nil { + return false + } + + return hmac.Equal(signature, calculatedSignature) +} + +type ingressHandler struct { + alerts alert.Store + intKeys integrationkey.Store +} + +func (h *ingressHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + cfg := config.FromContext(ctx) + if !cfg.Mailgun.Enable { + http.Error(w, "not enabled", http.StatusServiceUnavailable) + return + } + + if r.Form == nil { + err := r.ParseMultipartForm(32 << 20) + if err != nil && err != http.ErrNotMultipart { + http.Error(w, err.Error(), http.StatusNotAcceptable) + return + } + } + + if !validSignature(ctx, r, cfg.Mailgun.APIKey) { + log.Log(ctx, errors.New("invalid Mailgun signature")) + auth.Delay(ctx) + http.Error(w, "Invalid Signature", http.StatusNotAcceptable) + return + } + + recipient := r.FormValue("recipient") + + m, err := mail.ParseAddress(recipient) + if err != nil { + err = validation.NewFieldError("recipient", "must be valid email: "+err.Error()) + } + if httpError(ctx, w, err) { + return + } + recipient = m.Address + + ctx = log.WithFields(ctx, log.Fields{ + "Recipient": recipient, + "FromAddress": r.FormValue("from"), + }) + + // split address + parts := strings.SplitN(recipient, "@", 2) + mailboxName := parts[0] + domain := strings.ToLower(parts[1]) + if domain != cfg.Mailgun.EmailDomain { + httpError(ctx, w, validation.NewFieldError("domain", "invalid domain")) + return + } + + // support for dedup key + parts = strings.SplitN(mailboxName, "+", 2) + mailboxName = parts[0] + var dedupStr string + if len(parts) > 1 { + dedupStr = parts[1] + } + + // validate UUID + err = validate.UUID("recipient", mailboxName) + if httpError(ctx, w, errors.Wrap(err, "bad mailbox name")) { + return + } + + ctx = log.WithField(ctx, "IntegrationKey", mailboxName) + + summary := validate.SanitizeText(r.FormValue("subject"), alert.MaxSummaryLength) + details := fmt.Sprintf("From: %s\n\n%s", r.FormValue("from"), r.FormValue("body-plain")) + details = validate.SanitizeText(details, alert.MaxDetailsLength) + newAlert := &alert.Alert{ + Summary: summary, + Details: details, + Status: alert.StatusTriggered, + Source: alert.SourceEmail, + Dedup: alert.NewUserDedup(dedupStr), + } + + err = retry.DoTemporaryError(func(_ int) error { + if newAlert.ServiceID == "" { + ctx, err = h.intKeys.Authorize(ctx, mailboxName, integrationkey.TypeEmail) + newAlert.ServiceID = permission.ServiceID(ctx) + } + if err != nil { + return err + } + _, err = h.alerts.CreateOrUpdate(ctx, newAlert) + err = errors.Wrap(err, "create/update alert") + err = errutil.MapDBError(err) + return err + }, + retry.Log(ctx), + retry.Limit(12), + retry.FibBackoff(time.Second), + ) + + httpError(ctx, w, err) +} + +// IngressWebhooks is used to accept webhooks from Mailgun to support email as an alert creation mechanism. +// Will read POST form parameters, validate, sanitize and use to create a new alert. +// https://documentation.mailgun.com/en/latest/user_manual.html#parsed-messages-parameters +func IngressWebhooks(aDB alert.Store, intDB integrationkey.Store) http.HandlerFunc { + return (&ingressHandler{ + alerts: aDB, + intKeys: intDB, + }).ServeHTTP +} diff --git a/migrate/inline_data_gen.go b/migrate/inline_data_gen.go new file mode 100644 index 0000000000..f0dc27d5fe --- /dev/null +++ b/migrate/inline_data_gen.go @@ -0,0 +1,763 @@ +// Code generated by inliner DO NOT EDIT. + +package migrate + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "io" + "strings" + "sync" +) + +func init() { + var parseOnce sync.Once + var data []byte + + dataStr := ` +H4sIAAAAAAAA_-y9a3PbOLIw_Fn6FZjUnpJ0InuT3drzvE88TpXHVjI661h5bGcu54uKJmGZJxSpISk7 +3sqPfwv3WwMkZdlxZpXa2rFIAGw0Go1GX_f20MtluiijGqNPq37_-HxydDlBl0c_nU7QoogyXNbzdYVL +NOz30gR9-jQ9QR_Ppx-Ozn9H_5z8Pu73rtICXU5-uxz3e9dpWdXzPFpi8SSL7AfFIs3pD_TpbPr_Pk3G +_R5eRmkGPZvf4jK9TnGCfprNTidHZ-hsdonOPp2eogRfR-usRtdRVuFxv1cWmfxGFd_gZJ3heVxkRSme +1ukSz_9V5Fg9qHmf_uigv7eH6vsVRnGUoyuMPn66-HmMJh-Opqdj9MtsejwZo4sPF6RZHJVlikvR8ujy +cox-mZxP_2d2NkYXH8-nZ5djdPlh9tP0dDJG76ZjVJQU6DFKr1GFa5RWaF3hBNUFqnCekIHRbRohOmlz +DeIir6O45uinODLQj5COXToD_vdtlK3FDySBFr-LVT0v1rVE7Mnk3dGn00uJT7Lmc7Hg55N3k_PJ2fHk +wqKJNBkR3BkQ1zhaomEfIQ-8Ca7iMl3VaZFLaOQcwNF0AnRH7NEm4pUGKnlOIHzQbHAVR1lEgJ2viiyN +7zeeGvlR4hWOajQ9u5y8n5yTJ03Ac9Jclwucx_fzcp1JEv15-v7nMTqd_Tqmf87pX6ezX-fklzmLCpe3 +aYxDsMcljmqczKMaXU4_TC4ujz58vPwf36yq9XIZlffyt0Z4CFU4u5Y_buplNl-XmXpbR_W6kj8ph0jz +OE1wXs_JJq3qaLmygYiL_BqXOI-xMViSRlmaz_P18kojbgPnUfw5L-4ynCzwUnyCUL62CNG6LuYlrors +FkPvl1Ga1ziPyNeXRYLFriHvHAKBVtNpxOgSITlxscAGIp23ak8TuhBrfDH5f5_Id9D0HWWQk9-mF5cX +qjfDzrzCf1BawrcUDRqzuzyfvidTRUfH_zyb_Xo6OXk_IUzrfHIxO_1lQjp9On8_OTv-Xac90uJ09qtJ +aeKjIYbFFovjV3Kes8lvl78cnQ4HANwDiqzPmBMcIsz6Jq3Q9TqPCVYrFFUoylGUpVGFrosS0RHS6_s0 +XyD8pcZlHmWouq9qvGQNEpysVxit1uWqqHBFVlLhhbNQ357oQVsiwXWUZhX674sZJYw4SwkexFD0l0a7 +PcLZ8ZdadbA2Bl9zjXdURbY2PlmX9_O4WOcGsWq0luFbnOnvOB-ASJS_EoS5OVk7LFSuZ1RV6SJfMurw +MiLWCifzK2212coRinO-SVDVgr07Ow6Yj3gHToMOtRkHXd0UObaZVHwT5TnOID41J5ROxAix7LfGI4tQ +tBmxh-jo_PyIgtQWMc5kNY4XmnJVR2UNTBjnbQ8SgbSr--5ACmpuC2xr4tf5PdBae91OZphXNV6FBYcs +ujc2ao1Xc5NRPnxX7u0RyrG5_uTE4PsnY8H2T_p7exFlr617jNHk4vjo9OiS_Hk2u5y-m9LGFxfT92eT +Ew9fyIrFZvuKTUdtIIOyNt7o4vqwPWnJkEfUPUScgHt76KZYlxXCUVWj4hp9ujweI7y_2Ed7_0UPq-ML +VzKWt5wsuqfi8QbA4utrHNfpLZ4nFJPirW8iN1GeFNfX80SjVgJ-Et2P0Su09xb9l9aKzFRj4YRX0NmP +0atXb17R5n_7-5u_v3Kk5KJm5KvOYvaRNLsfozuMP5P_FiWK11VdLAkh3KTX9TzD-aK-MQC7lq1YG8Tb +REtybFo95-s8rdX3fH1JqwoN6ZKNydQrBlQ1IuOJVYG4DH8XpDq2nOLKs9GmcFmHxH-Fohq96nhkWqAF +ZsZatGSKfL9673UwG23F8WhLAT6hItGLYIKitijVPk8T0YoIZGz0vT3O8yiiSswvH3o_9RQQeWq8KCM- +P-8imvTNlQ1HH5nG4PjTxeXsg7E3SKtFeotRWqMIZdEVziijkx-bCxFZKFKYpIwJb1xHGZGg6dhM4xIl +SYkrJg5TUCgEfuIig5GrElrgHJekBbq656JZ6_PVwVVe1Ol1GjP4S8Z3PRRhHZPtr_X8BllHMXgc8FcC +PEvuW9c386uoSmM6XkUXtPWX0ewMnUxOJ5cTdHx0cXx0MrGJgDSlS0yJj62b1HNR6TGqqruiTOY3UXXD +WonXHmgXaX2zvnoccPnYaQLCSwHSlYonxV3e75-czz5yAKfvxP3UWfgDuJ1G354WPt7iaQ6w2lYtGxr5 +XrtSqqdlcwtdZGqNC8ILPY3p6jd9S13bGlry17o6IqSKCGGr7eQ87RjTLrxrIug-0EQqHgPvPa84O_Hh +3OInoWb6Rm6YCt3CfM8e9M1N-GkltuDvHw105ms2R44IwUlIK_Mdiio0Ofv0gXIThAbrnFxW88GY_6xw +Kf6OkmWaDygjODq9nJxDxgTa8nxydvRhgi5niM-v_-njCfk-_UmbXEwu0VVaoEMyswl9RP79-vPkjD6f +XjBrwCV5MBjIBpPTiwlpQB9Mzk4ODFAYX2RPjmennz6wwcjHBB9r3UForwYDH_wUfdAE6AtzBhSPbqvD +wQBsQKdJGrSeJzOTAOv76WJ69p6-fvPGJos2Y3ZCnuwhsSfoyf1401hMlDE_D_Sh5M-7UBsUBKTeyDQi +NbWWl7rmhrVvVicnopGU-A4MkqKzpO8O0enl-fTD0CUqzQQX3ByqnaCdr1_d0ZT5zjeY2UwRqvW5ARp8 +_SqbiU-OGimFfrvb4oqPNK2EwoDNLpnM4vZWDEtnZiE-57AMCgC8T_wd6XbZvCfd7kxY0zY5edBpGHO_ +Ej4kBgmMomjaMfyGPq66Abbcdh1Nc2_bPsIi3K69ad02zoI23S1zOXiWGH3JEmidDpFL50Z7ndw5oSsx +wGG03SQGcZ1a4vqmSKiqxidAAE1teeLjp4ufhQBBb6HiBzW9ix8XHy6YYNEJNG76bgmdMJRbAB5dXkqQ +mK1fAkUt_uIXt_uLn--mA_eOBny24t8BXSzIC0Ul-tWQqxDCqDY7KBOi9UJMvAkzrHWSVtFVBnlmWJ4E +pLFx_ZQNtXsov6mCF1AP_pzbYyMKpfbAhfUVR4E54SaI3TUEJ7AtHAgkTH67nJxdTGdnlr33xWoRl_er +unhx0O9Pzy4m55dktjMvrGNKVWNKQ2NGGGNBBmO5wmMB-ghxmfZ0cnyJ0uTNGzIXfRBdegMo0f5GoLmE +gvupSCDQu_PZB3XDcucJ0QWZKl39sbvEanrw7Kx-2rTtD-0bIALaDf0CF1Z-qPm5csmzdg9SRExJVl8d +CWoT4dlL7iwM04XqZMee2NTFnjp0zNYH2hQ2o3kE1aRCUEedpI5KAC6NxPmndZr1oNDXQW4O1lBDmLu7 +TLL2NrLbuJiHJBPfSd6iLRNIHKHGluodvqjLTF2-J9sGtuw3EDwa56vJo20lgBZoNfFoOnOCKAreh5uc +QQ3u5ELn4AA4I_jdjn-Q7ebLGftjvkzzdU1J3QF8w7HFkPIbAE4bjntx2h8ggMDYJYA7ilgUVZfpYoFL +nEiNHTUBi19xVlQ4McVWe9xiXcaODL8oo-soj8Q4yyhfR1lwnKxYzKnXlT0UN0CJoUpcrHCufrN5zeOb +KF9o05B6avsNVxurB9AclZNPxdn8T9P3F5Pz6dEpJEuaxnFLlm42hQVlRY5fF-MOxQssc1FKb8yHYiTg +EoU7lKILczTaftynw0HOZV5xml6PVQ9lS5QN8-JuCJncBHVoMn1gMVhz1obAo2FbrGcA2YDLqQkfa8bo +FCRec-2XuKqihXXF0uf4y3TyK4fZRmeFji70o5qBv58m6OhCTnOMpG6NXVrWeT2sarzaJ7M8ukBsqLHR +ajjkYzkr-BK9HqH_sIYZkR1ZYtLwFvOGcjwqDrDhxh5fJ_J_Y07quq7wfEJf7YOOTIdic4CvjekcnZ3I +xrQnn53aeLL5-_PZp4_op98VMi1pStCItqfH2g4e23Q85pvKEKq8nZUtfSw5LbC_GFKVJc08Di7qqKbe +yz_hRZoLQpqdo_PJx9Oj4wl69-ns-JKcEYQq9XEFK5yneYXLejhC55PLT-dnF8KZS1DcX_4iMfbT5P30 +zEA3OXkmv-5zUA8lA4UmcvnzxOxMB7DxzXe3omm6lcZi94zQL0ennyYXfP_b_wgwpBMIh9yYYzQ45oze +HmB0YDyanF5M36HZ6ckznKI8_LyTPOctyJ4dfP2qVoqJ7y0mP3my2VhHt3dOFwzT61VC_Uyuy2JJ5qaW +iM3t69cBqouNZn12gqbvzGdsaxBg1XNqVUN0g6DTo7P3n47eT9Bgla0W1R_Z4ADeppM8efgGppyDsMCH +bd3tLaTadVw882871gDdptEbuThUKgEWZ_T8FkEIjGxqlYb6X2bTEwvvJs653p4fKpExtYvJpStBHbqP +XqLXzqmDDrm0pA9IDw0hUVZjn-cxPY29Ikd2mwE9ETa-xM7uoQ3US8REBnqHUVuS324Gozdv0rzG5W2U +jdCPwAQQP8sjjfEG5VCwd8VlAOj4N9qFRI9moUMMk91m-1LmJJ8NfU53xzwkYphXAiPDsiWBccQlnQ6A +DvE-jzg7RHuvCaU73_6r9ln0IxId-J7cfN8JeZ5zKp9YQgc_ekfu0HzjzM743qGv3s3O0eTo-Gd0Pvu1 +j4S9eaid1NMLdDK9uJyeHXM9lToIGBonv02OP11O0Mfz2fHk5NP5pFlGOgjAL7iyBjlnsQ2Qh-BwWL3f +i067PRDSsvRvisM7SiuLSwP9fH0Y5_a9pCgUo7WXQw-8HZqQ4cQp-9z_AhYhecNb4HxeRnlSLOfrdZrI +217Y9xiy3_iiPkL3_BbO4u2NSi1tSfwmz4zwQ-9Exzp0ShfPvnv88-T4n8OhDv_0QmGF8B4BGnccGRHu +M9Qf6o3tgUgHV-0dWGcqx3ScCmzvCYwieezx0cWEcSLlNX6IBq5jOPeGER7o1BWGTnpydjJuGs70PDeH +YtCaA_alROD3h9X3fKBVg9Xpz-7L34rqGomOwzpW4HjMMP4xABIBtgqjDE7W3GRDKUNv66E473AO-TE_ +LojGW9Fe1Y74bCW7w-x1_bpg8AYb1Ll8a5aIEA9mtKIeXN2mUlnawYh5PYeA2-D0sRWa0HHTQrP5mO4M +dLo6psb6qSIvlw4II1cTygLQHLuUoQilq0fk36hS5solf-B8xNRU0r5jeIrLsddqU44tWsstReZSGvwP +JXyOkrJs0yhvN5K7nocUB07T4bDcN8xMzFPPczNDL1G-r4hfXtSsdZLXsrnyMHP01XQPy9noixXxxRKk +Ya6R_2hn_2242mpXYHHxDt53rZXkYPsPqUPEFeOuEvphV0rPNZHcyUztu9ur4c672a1aKcuppsdcz821 +0kyNZ7LHsFIFOYoVnwNEJcXKkaVskZxDOn2ANGz0kgjg_YyXszN0PDt7dzo9vkQnM8JIf56evT_oy0ac +UzoOQBUnNAEMYcHTMzRsDaPV_1AKsd9ORxYliXdBN1VQtlnXzdcWqQ2vtDdMqem0O51-mF6i187zRhL4 +1mSAtq5CtVQhzrqLa7tXo7O5hgSgsYN-X6ALZCqaYEaPrRzjpDLPdyK7WAcXwfa-UolH8dI9bccoXu4z +l7V4uc-dzaJ90_K3T_3tokraAGW8A7OawsJOFC_HnhUnTz3CC-mkjjt5BvY1Oo91IYVO0uZr9Jxo1yxe +8jMHwo7dUjoIH6J3R7qFiR1M6kuOQNB0upH3mh_MEOL61o7menJbQgc03BpngKEzpgkIZGHkGHPkIrSm +DJHt3PsF5I-VFfHnB1wzeE4i-17w3dxBIM8N-L6mX05005fnerLhgfjHOi0x6DhHFmo4N_E9RnMLx2Nk +9-NN1cFKkeeeqieT49Ojc9OKS74p8X3o0sBB4EzWDzA_4Tk7Zt55t7gThg0ZaqlfosE_5C0GMC9ZR3GD +H7zYQpTp-8lirPbKCDkQCoMpx7i2sNCKkmdyMGcsttDTs_dkQ1Oo-aiBeUGqh4YJjToCbaOVSxgSNvH8 +qYXQEmc4qjptOvlXtY5jXFXCnbTjdaTrNmHkniZsK0igDjVqMIW46TsBofEY9D3hkhZACBeTS3nQcAuy +BVALZxB9nsA3guNZbhabE4jPINZwyTzQzWag9KW3AGUww3vdnf-BL66jUZfJvqH8VkF9ZtdYoa0e37o0 +MVS6HD64e1o66T6cKbo4AdMgPUCscUPlZFphFHLd1eIvlXyhQkhNX06D_yqYVWyLcS2QI3mMTkCXwdES +l2kc_fX4Jo2jRTHg208mG3Gdqs28WUcXhks1S5slHKBpLi3x46ZYl9k94A0tBnzAYuj2BS85Gwj00qIv +BLLDgspgSRNVlvM2TWrIkpep7EdBydLIP2bvONH-tSmFGjZJMjfX4Knhn1oxJWBmAJV5A9VAMQyfPnun +kQJODGzghysMjWdtv8oNlQbtimw6MLHNV1FZp3G6ihRD3IDw5HAhwjMQ7CW8VVGllLr0Zd1CVKdJDxrA +Y_nJEZnm5PycJX05m15Oj05Pf-cPJydeirGQCI4NW8KdZG-CUDQN9d5rK5gOzKhEGf0neigH9PvSjlgZ +sTT-DmY0jeFCMUNFlsy1Rwctx4QSDgBuFU18Sqbr8H-JymPaJw5tkPmGYTKV9U5XFbSdmh6H9QDsbOgu +0mkBnsKpxLRCN2UCC-T_0g5iQ99Y5PM4yjJxf_h1evmz2pUiR_SRrrbiG88Qm3Xbv3WMWi_UmWA8N34M +dfYsvDSNbC_in_RCORRSAcT8uYcAy1rqOtdrgzA5IzRGEt0DQ_CkMTSt5sC-UOi2Szqz0NQfPE38pS6j +uB7iVRHfMEd0evLvKdSP_vr3_3r16iF4EB8h2PB8A8ZRi45__T82Av-q08PozRsusbCnjMX7MUoZvjw4 +6avRGK14InKIslloUUkDi0yzrzEwfHaVqzHcSuylxDLmlqt9_ew_RImw7wi157osySVO-wwItq0C3x7E +5nuBulXriYh_1C1gtS_lE9JEX0T0H2i1H_NMt-yfbtcS9yX85c-JjKGJjZfUyzqIEF0Qki2SfZD7EgB0 +KVO9iYX1RD1SPhEU2857Aep_miCPXib72nUEYvaypztbqzfOE6uv2UNaqfptFgvaRbE2YZuobLfC9jq- +ZXGLFc8U6zvUVHo5viPPiQgc1OLZavI5EYh4R6Wimpd8ZPbIVPtx4pAytE6UaV4XcswxG8fglubW8KnN +pu8UYG_V5BzFH5c14cGJnKltBfnnS_Ra7ChjO1FgyRaSLX9UUBjP3x5KmDS1nq023Ai4vdbAvfUA9yMI +nKWD7AKbRD-0WltSZnZRd1ubQUE-bGUs-j52gK5v7tbzychOfZKbQkjjB1OFu_T6nQJyvKP3C_M5GvZ7 +vZ7yyNtXetu4Ns4yxylvLF3GyBD0LOj1ej3pZEB_KWc79dITVkYbhDz3PA005z_SggeOAz4BpPuGbnhG +10b3O9FautT9EAwRVu3jupVfYa_Xs13PrCt1xJqN-r1eXywv6RatleMK_y1SNvR6PeXpHa33reszu3oU +Ulwh7emlQjWlj85O-owYDEKL1mN52S3ivlilIt439RsEHu3B7Nx-wqH5_r0LgW1q9BH5D7oHOn57r0SI +BXWaj6G_2fkvdqce5Pde5G7EmwXQfnNPx8cnLfRYPpH2Qf39OBtCSP_3dDWkb7punp2PIvNRbPScaBZq +qaZACN5kpxCRtt8zFIpSnDUMWVKvKK1lfSW0KtNeOdZ_2-opKbiUbDlNRVNfE8uMx6O-kMBMgWfc75kj +sAdS4UOamyqgXr9HJqgENTIgqMRDP-oKLjIpKsBpz_aG3s4jKd21bP9SaMq4BDhC_4lIc6FG2iNdlZYZ +vUTGA0NZ9Weaonip6dO-m-m9dEAzFIB9tnnMrePX68o9Sv7WrgJsP9kK25LeX7iW1veqiKFXYgtaW8nZ +aNbkCIQMA-N-z_BQYKlweQ-NTnva-rIPSDUHsNGtrQyXEfCXFvSWBYMLcEF-J0bZPLhkn1Vw0Ou81NF3 +iRx-In6ZySHkv0ZHeikjRDOkRrbBGNX_2l_X8by4vq5wPbISyinjejVGqwVdg3_RuqnREleo_pdlEaj_ +tc-z4Ff7WvmNxiKJgQzhbdEeKpvoWwWrdKL5UFVKhJYVqIdIn9tVD92ntKJhx1wTXgoROKRh75bXkLHw +2lTHxhRB7yLHeciBP-jFxEmQbGILjMlvl-dHx5fDk9mvjMLUmtLmI4hE2RtuIx-8GXzVmjCvc9kISkFt +TkUOqrt3GPmvTGNqmHpVTUiQczRGe7Qkbqt-ZNtEIxaVtSwV2Ybi-Lw1uMaAikx80CAQwb1fvta6aGcG +Ww3CuHe-Ryp3xM736El8jyirkMmOaKyA4WHEJSrjGSiMgS3cZE4G1wV9mZqcs-u7NEuLebWsKGRXUfy5 +GrJj3ChxTv6ZjrqiDilthnizukBXmKuNeOCoGJbgjf7z-wyDQS-Ql6X49jpP_1hjND1BxTV9oPdms4iL +BBt3YM3jM1PFVMkFV0ziLq1v0N1NGt9QCkHVTbHOElTialXkCZkjLa96k3IrCxPJOCLFLD3Y4hBf0taI +tC5xvS5zkRCTvxjKp3c3OOfgiTTEeVGjBGfpLbmbo6t73omxSe34GOorOCaIsKLmjPdqAjwXiiP4wkUT +AQICihVCRHdbpDHeFtmx9aL3lbuoQssowc-d_E5h8nMkThcLuhu8OYbW72FkyUgcM4y2oVHasC1phmmv +E-VZVAQVynTCKuqbsqjrDIOFqwwSAEIoRGfenqeOAsZ1LyNm4Bp9RtNvsjGsAIVQoiJdtFLwsGFUIKA1 +lcbIHjGSc84Y82qoFQGf6XSuRgW6NDHqzzkirhDkNhmbFXvwF7DsPo6M-DhoM1tdxjGEXMhs7TDb9gav +Chtmej272maWL8O9U4N89PI1IzaWilNdJzypfigSmNDpMeGTgaBXzy9zcJrHpQfL1Zx60mW4xhsi3Cvh +S4JUS3OJTJ8GI3wDMCp4MD87PWmfb1b_4lueTl0-gSOIZ6cnj5CjpN0qsC1MU5XwM1ulfoaQ2yJhSdvl +19M7cJj923Muc6Fyd6x3s_OJllPlYSCH2IL_DGjFVRrjX9f1zbwuPuN8TkSfB0R7GfoJqyZ9SF-Bv6zS +Eldtcv69NPIOTM8uJ-e_HOlhYNEC5zUQKhqOj7VQEK5e5Gbp1s4OGkNoHl3CUOu2aihl5H6I3-XPLi7P +j4hY2vzRcA97mnbM8FgWwxhDpejHRi38sV47fwyX3AfWgR9gryExsFXhLG1KIhgT1j40jaBVwmvVkzZo +EopaFdryFUCDlm2DOXpG23S-dDg-YT93ae0_ufOe3HlPfivvyX6vdzp5d4n-ezY90xwpaZK0teVDaZoe +DxqdKgh193ahh7vQw3-D0EOmBwyEH8I-Q984FhHpXr26ZxHaOFBxbLpcPMLsthGqR_m64wACQ466xzhu +ENP474K3xnDIXfjjI4Y_9tVCJ0wUi521o2tse0Dl-m_qUBm8yFgiMVcZgVI1uTywMuCHaPBy8PUr-8FL +PdET4hANLj5cDIicIX7_MpseTwYjSmCsA7ntnk7_OUGDl_8xAC8dbaGo1ldVXab5Ysie0FPlb6PuIPnA +Ucal3R1hd0docUeQxaW-48tCF36hX8WJpEtepAU3qmn3eO3dXFHL_Pozvh_3mUtLm7b93rvZ-WT6_ozZ +-dTbUb_nFk2mzkI9wPUlrMx6lvPoqOjb3CQGLH-jyVOf6Wd8X7mmT1ntGyiFYvemHTordeF0byqFGwyj +3tauwg3b8eWKwAm6hPsDd3HVFpZOXLet2hAZOjO9J99-7LWqm6616fNywHJA2LkYtnU7gOgmWtUMxqIo +9uYQSdD4FDY4oT1Xx7qNYpy3BRecpLGI24rYbrnYVejcVejsXKETKND53GtzbmjGfrCzAKeDNlsQdfMg +eI7-AxvEg_o29KNV-edDART871Xfv7FGSLNQ1MYHyJYmPfcQaLqwiNl1AEvuBD2GDAnUadFBpg4JrGtP +FTXNxgnEOBtFytoty06G2MkQOxnCJ0MM7e--fD2y5IjDLQgS3q3J0_3WBfcC5o7GNlTPRglH_nKA-74U +cxCd7pR2T6u0M6WwaA3R1CNo9XJ8Z0YR1GUUf07zBaqjqwwDBaSMEzq-j1mhhZ6TwZxrPbp43X3TOhUO +u3VTtXco79up9sVRktCFkJ71XN9TuMtDEY7iKEflOkfJuiRLleBVVtwTNisEKk_kAQ8-vJwBL-dFlhw0 +hS6oihA9y9l_bD-Z_40q9nQJT87P0ELygAbfJz9dTM_e80ZMcOBcwQoSCIUQsLkJXVe7KoSq1OCB5a8K +lhRsV0jGvBJAsUyOuxvUaK5FGVEh_gAa14pU8Y1sNWsYG4hassd1m8jCUG5ZqPmWvrCi43DynZ6dTH5D +VAcBxlhZQRVAKy2klm65uUxn2b6jc5fxhT9Dtx5fiG6_oZJSxXNnaKmDWG4RYbDRak1BJav7SmCA8v4A +JaT7PUrYVL6iABnilXWcagKUOv-EqARNhx6xcEIpCWosxF4ItnG_x08pEyIHkLF7RtHS4Hz5xfmrptjv +jfr-sll5n9ziJPEconhfDsUmzMGKx314gku-lLnsKMQKlumEJ7jKoWmz9ypNliaBsHLhmrwXq_RP4QzL +J7P-X5LiL312ke0pO8DQJC5vgS89FxQNgKYZiXvnR9OLCZr8djz5SLnn4LpY5wlK873rLF3c1HZSPDHM +XVSxcQYH_Z5I0esHy1tfrQEOlgRNpMFCrCctXFfpH56cnTD0eC9AaovQYoTUxVKjyErL6KY8YMgdeVjt +8zlTvR3_-5F3jhKbQcDY1lLeMfZ0KFim3FxpmXfIL7GOe2i4jL4Mc7Pi_AhUE4ioiQAeGDTa_AE-l0sU +6JXzK00oB2evZoAk-IQNODUiQSnZYDZqUFvyHGuCprT0qftIJdkBYW0aSg1su5fScb9XS5gZwjxko91Y +9fWtfYxK5GuKBDVJfsLdkfxERtPn6czI7lyDTGx2fjI5p4vDSehkcnHc1_NNAuK2mYMyXP6y32su6Mm_ +PZZMfgwe-XIJq31GNzo5QcPuy4G1pUb5vjw4PMvmp3V5jgAnOgBDs5TBB4QEHJWLt0lS0i8B8oTzB9E2 +DQFC0204LR4HGEwttC1AhlTXYoKNuXMOvDYgD0ehGhFM2a6rtIgN1puX3DYEzos10HhzzFtrjKY3zEuT +P4fZ81vuys2zo7GZH12g63W9LrGCSaR11TRxEIxCowZuVcHKYZlGKIwMUUx_DmyKHJBz6dG7wnmS5guG +JFdFw88h7xFAZD1DIejds2W_ZzBVY0J-TTh_qZbTlP3Ey-8L8f6dEciW69w8Yp3zQt_ZPGHumOM0quAN +Rr7tnvKxYu8oTDh8p8cVSD5Apt0gZZG3Kv-uRWZVJzoD5kUVyIDyUs_JC2HfIE4KiIccfYYcCRHfo_wB +ofO42md8x8g3G6zl21yLd1fWfVfW_c9X1r1ZmlXnRdtphUeUJ4yPmefqoDH0FmqXMJVGSzLq9XqtEtM_ +lwL1gC6ZixNtK1T0er12F9QWl1N9_Vs0l4sL1SZQxkn4hPGvU6_X0wNCQFGLAQo6UnSy2IbPZF0KAiQ1 +cVzB0ph4C8pPbJoOw_XOlXte0GENrzjvNu6FRtXnJ1XCIKAoPEVRHaLX643QIXql5D9XdvjhUCrtnHea +T5levEnBoXP4zTdpq3tdFy96j9_1TpTYiRJ_blFi1BFoG63bP4ANbwZrMq47gm697-SR0MFPoK-pkp7e +aN-3FVnf2kKvl8H21J4xHZ_pYvCrt7KY8Af-fS-ZTuWpVrMc-02kY2Rb-sSwjFnpdjcBnxtY3aZR3m4k +iOktoabDoaPDq_GX2uOJiV6iXNPe6dxPXaI9QjuNMlatAOmP7tNZlqBbXFZpAZ16IYc-nzufRhu6Rx9_ +Y_v1ice257x8Lp3OkOnVzoRDutl1hz4k_ZG8Ln3I9DWCnPrgJqZbnyK34RDQhrBI5mF37z29Y5PvHm8r +NUQ_vg157onW7fz2RiM-SeWc3-igyXuMTDJQrgemGx9ifnbKk488oAlXJHbP0ND16RsBTn1IZDQx_Pr4 +YUUoy3xICWnoePKRVz5vvuGwwZ9vNLK933eF7HaF7J6gkJ3i8sGbpKFX9d1kD5yclg1eeizEgibg5AqB +2bmW15Ixa5aXozmPJejYt6souqsousGuaIoea-fxaNjG27pJeq3hXQzgoXsdkG4RkJONKUjrd7u-vmyN +DZlM6Vs9lb3Zvbe3Z15seOhL4CMy_WnD0M3-v1Jn29KFtMnZ1XQu52m99ZFnZ6e_e3x8uaxhfailPzAy +4gW1t6NwdQOfR6sPZsPjOQhx2Dd62_DSlWYexsDSpGSHfLGusn4zNVzQVd5WoLKu_KWwVaufzGItry66 +3VrcVZj1-ujCLPeq3278BV95M2_ZV_4-YJJG4nYEXZ-sO42_yqu8RzQ0CBaAVW200q_XUVZhealxq6Py +V13KvYrrlPeGRs63ofBXlaXxHLraN-8OoDOGjj5gBE1UhmYF9Ghd-NU_BFD9dUT_dUgPQtXrOus9RIOB +SISmPZaeZ8GBZLjjK5E0jz2A_dZ8h4ws2KA-rxsBBoOGAhDhYZTHWpdR-ER0OF51AkMbQINA-biznUpe +87KJchnobxv_eocuyxc4gIVwQz4XQBTYqe1ateqsfTicIDuUhkkXP8xIM_c9h1Z_7Ugj0BgBkQWYtNaR +4tg7rHr7CIC1zMm9c4t8Ft55379bpLjt8Saaq-6PyFiol6jbcn-_DpfeKsSdNyECNLGxI-o27kTeyNLn +QzuSPB8Oh85KjZixA339igaacZc8G-lWj7doSPco2kNDffVJf2khpjX7aFJYWtd4ZO5hCgKV7Ni8yLIH +ZDsPGWxL2ovbiHomyXSTCUEaIv8s1sBWDHC9QbpJzs8hoPtDiFOYFwqbYdjWi5aiPMxDdGn-33vNu6al +NPRENY6WnoyjzUVszDGcssHkOWrOm6lLQrAKa-y8q9bLZVTeuy_YDd1pjrNr9-lNvczm6zID2nMTlf2c +JtmRRWwkc3IbxkV-jUucxxj-QJJGWZrLmvf26yj-nBd3GU4WVL9Jv1SsATxE67qYl7gqslvsb6XV1Jkv +iwRAkJzTulzgPL6fw3h0mlENSKNIbsi6DRE61MxMfqL_mZ1x9w693DJbeK2iuk6IZq11vRvOruE3ggg8 +_RinAt95iKH9XEwigT9iEArcxEMsqhC43hagF7ChTTLop9nsdHJ05jQEKQcGFKYeBKS8NWtLUf5C2P6Y +_jm2cqx6JSfV21-BnZceEHlr2U_9pkRLv7slQgVITSM38sa2tdpdGDQsNEJhrok1XSufLBuDWSUCZegh +4raz97pJez1uXE7RNZ6bin2Z5WqJroiYsbeHltFnXCHWMi3yv9a4qtN8gXBUpbhE9U2Uo7q8pzl1ClRi +FjAUZTUumf8OZsOa6LSPuPQhte3cRMgGQm0CA-v-0pZCV3YGtHnl0oSnjOHmE2EFUsHPvxa-d8pnBWy3 +99rJ96O8YrxpiMDlCLgAipzP0FfGOpDATuK0-RzWeztIEtspLlb3KInqCBW3uDQisD1dxzzDtRGDx1PP +CRNrmrx5w3ydvW09aV-tKHAvtco8k8bKearrumC17cra9wMpap2wdUEoHky1BLBlTx0-_mm2rklZrNBt +iu-qZsfAMWRA8uactLSUmjuHnRKIvXIT8CuFo34a_P5xwgt9Ut9V9YqjpA8YG80vVLB5UbfvuMdNIK1P +OFUs9_0DZ-5t7asFC-DG75Bno8rbMoC5TSFEDZlpedtRiAdpm7gZ8XYqexDfbr77x6M537e2TX2G37XH +f9XQ5UmdqUyZbPrPitfNSaf5APb33ry5ShdpXjt5p8kYVgpJZJddYmOqb_iIgjncilYSTWzdW-SiDudZ +1QOVMyj3NJh3Wl8KbzJTXzpT8WnIAxpp_xw_aOOldzWR5hNtP3XUeuKV5TWt91IYt0YLuVDrDdstLNxS +96cWrfy5UvWx2uT23UIa4fY5VO1eLT2y3X4tc6qKPl18s5FdHg72y5YksHbozCmG2Tbpqt7H9tKWzw0_ +bQMOkLKhXKwo4L_dnI21ncMQhEHFbSsAk2CqC9VFuBFpD7gjkUSB6UokHsPORMZ3QyYdDWY3U5X2UibN +QzaXb06WIWfgcB-vl5J_cJU-Q2IAOjCARBrGFuucwLxTng2jUyjbht6wKeeG3jbEG638G_pjOwvHZk7V +rUqKdAlY2F71kOoJyoc8SbqBNI9LD5KrOa1kmeEab4jvYAWoylqaQDmovdf2QeBH_ez0pH0-fP2Lb2lX +7Qkcnzo7Pfl2q-WvStEqwgA1FKJAj1GMAlkFKSp_RYpqk5IUKa6sohTocQtToAcXp0AdClSg7RWpQB0K +VaBAsQqoWsVLRKsDh0pWoI3KVqBtlb9CD9qvtJcTrtSOe7KrPo1a4ldxtZVBptgieKkt39ZrLXKg_ccq +6ZjmFS7ZFemnybvZ-USLr3ogzKHz_MCsTcg3hI9RAM99tYCoRw-6umfWnDRfoCJLuD2nwXyhNPLUk0ho +y4eOGl74GkktuWMD03XwxmCDgdVXU-nT57omn28AqibCpTLFyM790QFoXLBmotsWzO9uDidoegD5FW0e +0uF5TI6eMOptLlIW3TvYfe2UhVDMkLd98GTDdqVhCzPE6Al0891sOvddLDoETYG2sJUkpRGkzTadB5h0 +GFztTTqkfcikYwOsGaY3M-l4EdcEn5DGNrW5VG0U4MyY7yjAbQeNBrNOF633d2py0RAVMLl48fbtTS5P +YHF5FIprMu9sh_YI6yxxVRclpuIH49DtzDDhop90TmGzy9BX5HPktbp4zC56nU_PkadlNkFKZ9bXHFk3 +rPYp_VVDNT5H5NaxoZ2lwcbyrIwsrewsWzazbGxlaZO5BriS_hly2igUdLWdIGY_aTSetDactEpto_fq +YjrxmE3AxDd0kAfmvWlrPAklBWf3F6-1xLCUmFYSwEISso60sIx4rSKWRaRFBk9hCDFZQ9AA0mT8UEHb +bnxF9dAAi7a2DivM2xObxkM2QvaNphhtZdJQQR7SliHiHfTD_Hqds4x5cZHgXf30J6mfviGSO1s7Hozw +oAnkz2UB2f5y-c0duyLcDjXsinDvinD7c-B2Mli0NFYYOv8H2Sg2sFC0tU40AdlglNBsEp6ZjKUqDbRN +0tSoQHDJzvq7s_5-j9bfBn9Evfe2DL2bsEf0MGsvaErcbdndlv0et-y_pcOGfeTqZoiyqKNaJV_8eXL8 +TzSsbtLrep7hfFHfoLfolR0U6kaWq2FsE4Z8M9dHncc3OP5sFwjh8kCOcYLqAsUFLmNM_uK6-WGaVzWO +ElRcowRfR-uspn8W66sMo1WJ47RKi3xEuixwjeobjOKiLHFcI_Z1Rgj75leWq6jELKeJ2EEVSnOju8x4 +4s0HI7R3epiCmPw8wXWUZmCsgkE1Elv7tmJcvfFq1rUmsnit2WI4HGqNtPVQKWKM9o7-FEkdqhqHBjUf +osFNsS6z-wFXpQ4Q-V0NWndPolTvnUT3QGeqiR2gO4w_W28nZydGMpujC7bkNgK2Nb8hzXO6isp6OMCr +Ir4ZjNGQ58-JNHpB1T75e06z5aA9fWC1Sv4eI2WZ-iv6-3-9ejVChHYZPyuuGZI3w7E2AYrsx4bfgJt8 +EV7doQ2XsOw_Nnb_j4VaSmQ2jaG_Is8OGkmS41xGdjV895G-US1jFd_Ywq0e2XE5IA-gTXn482pO1ZnN +bGa1L1mTzUWYQrSU9lP6u3kudMXSOF1FeV2hcgWzJskIk_lra45DEyh0SNrs65m-peHIbKkjIF6XJc5r +HZg2yACtmO5HtowA63N87VY2XpoRg4ScYTc0ewIdytX-i1VRpaTFCyIfkaF1Ikb_gVb7FLARN-KJSHv8 +ZYfmjdHs4pkLoTay-wA2E1gESLhtDdzZsYt8VWPn6IItp9NkOEwYlOg_kfhTmEkE5xyhlwQiyW0ZHwQE +EGMwazQ6_fCQOE-sAc0h-g7ZuMSg4QNgFLGGG5u6WakjQSqJSKkGLjuzzlk0khttfbXxdrJlUPbiF1Q5 +dfLakje5Y1_bgZlEFBxXE0LC8ijQ5smk0sAMXElVSlQi3SO0uqMR-isaUpHzzRvnirU9BHvFvTBwXYXH +5qn-n8Z5MvnPT-ZObyVehmRDBdNOlkM7WW4ny31faP5TyHKuDm2LUl2rwf908p2t2dxVQe9UBR1Z__SK +VVpRjV219F21dOPdE1VL35ZZ9N91e--27W7bfuNtSw_lncZlZ83bWfN21rznZM0T_2iSbAgFP-rOSjt9 +0U5ftNMXfT9o_lPoi3a2v53tT_3bSaI7SXQnif4JJdGdbLmTLXey5feD5p1suZMtPUWkecG0uzRLi3m1 +rOa4LItS1Fpa3RQ51svbWan-aOP5EldVtMBQg2JdL4o0X4gCdfbrmGLGql3mlF9iWg0VBjs9O5n8hmZn +ENw6yGP5_bH-KThwRS9pZw_bEn-3RRrj7xKDJuRbxKE-sIvFvT10lCS0gBxT6GfFYo5vcV7TongimOj3 +jxOkxaCpRueTs6MPE3Q5A1_PiyxRodPeQY4u0OTs0wc07PcGvJbeYNzvDUpcrHBOfxBMD5jBah7fRPlC +PY2qKl3ktLSj9UaENsoHPG8S_-Uk5BEvSlytirzC8xLHOL3FyYAsnJODkMygMqvys_n4Z8rz55G_WaSg +Edon2x3wdfSNw_Dqj_36cy0XzRz6vLFvlex1AHwhy5--4EkAPXUiATITY9hxpx3GcUsoy9yEeU0ZV_ux +6puyqOsM61CxnGrs8tVlKOfwYG8Nltp5OIOTPmhAICXVPCvizxusar852e2B1iRoHbZ2g9c-zZdY1ovv +PHUg4RZroxc8bZj3plX6e3ZJmV7PLNLfWKDfLM4PFeZ3KvKD0dvTs8vJ-S9Hp-gtO8T39Br8Zpl9BhM5 +-6dnzCeAz8JTXL3nr6wuvBsg07-y5ZsF09XztvXRrXr4UC187p8RLoSv-26ECuD7XTb80e_8JVzm3hDC +yRQVZf6IjIV6ibott_rs97Wku8JNGvkaL3eFm_6MhZtcKfghktjs0-W2hLHAUJvIY4HhNhDJQqNtJpU1 +j9hdMAuM-UDZzBr5uYpnTQjYUEKzZ78T0p7zib4T0nZC2k5I2wlp36-QFlCVsZgEBBTZWBQqamGOl1Ga +za3KGlztN_lwND2lSeZUKShIO-p8FrGxzHIb8Gd5nSM0pE9GXijoJDgYzrw1ZbtW1AOR4anHxmCBc1ym +8cDR4zo1QGh7S5sLNvLodOEBdc3uooyuozyiml0Jl6UGt0cxMUKHbPok08mSPw2VLNjYUcwGZuxYNlBV +rGnasjCm-Z5kbUF1OXsX1JXz7j50LqN8HWVBzPIiRz0DoXxYncbGcBMPQLzqDP0BaMDZC8-Qxv7iM_Bo +ynUMeSKy3hfoKoo_o7pA_7uuasTRgygVDTlW2K-7NMsQ-2B2j26iW5Z6DtNKYiucoOu0rGqUXqP6Bt8j +_CWt6tFz2kHf2a558qV5wi33nW6zzvuMZ2xM0iq6yjAq8V6xwnmaL8SMNwuKvM7nq5LZwJixj4Y6WJUI +iNT6l59mJ79D-e2n73gGfyEUcRsfdVE0HGnOj6YXEzT57XjykX57EEd5XtSIGQ1F8GJxjdgIbGIDIy8n +mr5Tv-1KDjRWTcJpJ-lHv8xOjy6np5MOmcvrcmGhpy8rlPK4ydk7JMW92ZkqZWeVJnVzgAOYF2m74yLL +cFyjyUdU5dGquinqCt3d4FzKwSVGdNuSS5fpNuFkxRWZc-VAzIlCXod-mr4nQpJ0b9BKjPGvhYpF6klf +9WG0t8voi-8VK34pb_pmC57I1e0q7qOfPk1PdHCZBBiEVivyY_dWvpvwCH37igdjOsWV4UAuUQB414HF +NMZQoZSokgMZDnaNZc6lx6U3ja7uoMbuPlGlBSLbqX3HaLkvQBk35kmWSysmwKtaiqy742BpKvfG6N4D +wUTP_tdGCml1DxSLtAz11OqMQVfI29i4RGpFAg_Jy33rniUSHN_G4RTHTWmdlw_r3ur7LYuPEf6lB8g3 +8iL6kaFJbJLOFJXplCPoRtKMXqerr5ct3MaIfRUsDe30g01zEVznEgkufrofwZ1xLv5tE_cWb9veGhgM +z891V0VmeQSvCiOF-dnkV0akjyhAQBKEf6WZzoHWPWFrSB_oYkSrGucN1CTEik8VtuWJOMridcZKlJt6 +02dSe8B48zjlkpolppaVj_C-IulwdaPWaXgo3WJXEam3wXbmfaCgEJZnNgxQ6_oXaqDt178Ih-VZlVOV +hGU-d4WsCo5aCIThQUp4tEnJVCcGwpVfEFQk1aFAN5woVJqBqXnbFJqqPHRhKLq1-qSuxOivEoq6VAp1 +C4VurU4o8tcK1RmAUSvUVyq0oVDogW519tQnAb18W5Zl1hBvicTeUr_hMs3aAPb3vHWa4TLNkJzeWQyH +zDEPKGmimzsyq2ozbQRWbX4eB9-u6M6u6M6uTtZTygnPosh6c4X1assl1m0mDLeEaqwbkc3endKGrLew +izbbET45p4lVdFCKuKq4ltXYd2JWk5jV7pKt7tMHbjdTBe80bauwAZq7yn0tkq9R4j8ICZIe9ZNdcYyz +OuoMQQ1S0W1UR-V8XWZmVGSDlwUwKPfssEZtgMevq7YdRXz1WyEuMNc2vHAnIXNm7h1DWMVuKNdPJu8m +5-cUxOnZ9HJ6dHr6O384OTlAe3voDOOkEoZQfI3Lkhrh0hwVZYJL8qbE7E_GUnFWYfTHGpf3zKJ6HaXZ +_sZl2pfFLfbVixWmjDGa5_huvioqND27hKRPKnueTI5Pj84ZZ5gXWSI6sLN4jld8uIO-LqGKm7SuyANZ +dZrXhRx3zMcLGSr0DJSyVPv0nYLtrZqXZlAMVjgXrDpY3JxIyX39HPMcPXQKjjZdDPKjBNTb5O2hnAHX +Mp5KzryFaextYxpvm6fxozMNzRrbYh7uLMRw2l1PJwMuIHYVD8Pc62lZ0JaGU55qjbxMnIwPYR3-wPyd +N_mzdD3-U3uTH_7bupO7nGy3DZ_zkv2pt-FuF4aPwl28h3y5i_f408V72FByL1Qy7ruj04tJl9Nqt1N2 +O-XPu1OcCKF-UhYrdJviO2mgoKnpRC65A6CBpikFXuuPAqpB1cgb_qyaNIU_75IoQ2bMXRLlXRLlXRLl +Z5REWfzblfNwrY67lMu7lMvfN5p3KZd3KZd3PsI7H-Hv3nmlgYiN-yEhY0LC_Z4hp0jVqc64lBgi-Oao +r8pxSoEIlWP9t3049IVylV-dLcGh11Mihf5YBF_1e-Z8x_2eLTn19GONNDcZaa_fIxMkRMAWngwIHq_o +R6RNlkyKEoT2bG_o7UxLd1Iiadn-Jf-j3-tNzk76vRHhyUW8L5jsXhEb_PclMh4Y_P3PNEXxUjttvpvp +vXRAM47HPts85tbxS1Vyj5K_FVfo83A0WwKiNgl6V_G_Kuwzkb4SW9DaSs5GsyZHIGQYGPd7-rVt3O8x +VWnPWGD2Wy1rT8PzGNjo1lbuFDAdiJb2h0tTJwrQORS6n_n6G95gVBCzQq3bRlsH4639sMr-o74RiO24 +FTvB2PSpFpBNfxt-9FyfhZRdjj2zlUQqGJOOIQO06S_HNRoI1AY8lwOx2c2-yHZMdot4bCsWOxCJ3SYC +e8PI640jrrtGWutba-cC8CzsxX9qF4B_W0-cXV7HnfXy38N6aUPZYOd3ajZJkYAtCZPgRK6dj-fTD0fn +v6N_Tn6XAQcLnM_LKE-K5Xy9TpPhyJOnB8pO1JSwhw-1zpPo3qn0JCGoS7GjlkXblvUaVy2b3uEkb924 +vlmXbdtel2nLllVUr8vmtqQpF7zkjZVmlHajRF69ekP_x6v6yPufp_nf_v7mH__3zT_-72DMUm_omaw9 +ZUY81bb4APVCXjE2SAJFeutXKnsEddnzjUKHOf55cvxPdUcZ6kAJ9RTZRvbnyDs-OaVPm537BxKY8A0m +B6LFyMIVxMwNCmznvT0uf6G7ovw8RtdFSVCP7jBNOrau0nyB6hssk3BFFf3JQ63ShA5hMAWDc8l7wJC0 +24A98F7sdsJv20b-MtH3ld3WEFXgPq9JF47EhjncSx73oHlAA4eYngeZHkLlH2kxc4WtzXenQF4j5rio +u_HxYOx9CEvNTEAKjQ_IfdeKBiVH3ZDh0SW5wfFnnPgZ-OX5J8GWeNibii_lE2XMQc8U1WJlDHSbiLNn +1pf6XwqxdNyxbGixJQfrV4q-FCJDtyNdbGSKNfbIvgQRfsnfABKaezUDE9tqrNOPLpeNGjsAus6TdkOP +WiNE2JBGJUSiflWCfer5FDLW6BcTo9ImPDyQD9LeEltKP-kUbdlke2VFsWqxi6-LMtZjqfzb8TrKKmzu +R5UNDVpwtjkpyHjFcGj5pPCLJdNjEoREFaJZ1Lz6xEaFZTg15MjgFd1puIH-IGZiLaa9OCaH8WnRNEWp +8RXrF_gt9srhFn9FcV7vL6MvikEJBkT-4-oe-dyNlYzzulnzyQ0FXEfJs9bYF7oHZXSkF7sc0jG26wuk +WPBg7T8E1kCZ9JnRuplz7mJyaS-VPDfYnNB_0hxueF_jHS896nk3Ew_vaj5XxOXbbPT_26rPHerra-mU +uCqZQOHdoXLJVYIp0cfQnzRRlOcraXJAb5z6EeuZOHBNMU9YeSPSmZJzxdMFzE7XPTKasHp5Twjaam8P +pfWgQsVnlF6zP-lhSa5PNPN6iav1EqOrqMIJKnI5rAmxZlcMXVBN-2PwoGa2PipqNZ-RzddIE-ENaR8k +ghm5qHQU0gGVZqNgo6T5DS5T5qhKXomdqQYRm1P1PtSdWbmjXbRkz8kfX78O0DnvP2AZXXUfU8O31HTU +CEzDSPAvQaFYN5JVW3HgchSqk52v8_SPNZYB3uSZLabbOjUjWYylD5AJcI1GIo2t_HZwZnpqD8f7qcW0 +GzOI-D9nLaJGKg9J3D3WvxNeiIO-Q2_krz5NXmD49JDF2HcSBVtrRf6fkxdtb2tvqCvzfhdK04HoQGta +NzZdPauAQSuCAjkKgVl1TvLCzbT2km5BBdecs4F_25MBgeZaYJNThYjaD0qv5RwUO93DKprnxTxZrzJy +ZcTcc8ybk0ZIxeKuPQIX1BlVkX7zyHqu5najq1VoHl1nQt7R6xtc4nkc5fMiz-7nV3hOmAdVpGreHkNI +i6oTvv68k2a1zcC6ttWraQ0NrA_QFmqlwoXL6-i70e8HwTkNjJGDLttUZ8qOZ7SzQbxU2bI93Rsw0Txs +Q3fbFU2bL8wHXDIOUZZFiQ-mKkU9QfucocOiJXQ31sFuSYHjmuGDSl3HMhyU2csixlU1l3VjN9S-FstV +huuQ_pUaR8MKVgPbkPbD4wrgTMICytKKSDWH7XBQ-XwNqn1eopj9vDz_NFFXUaCscdWnPs8B7TH1glb- +AdzzglyOCNtHJY6LMqFDmznnqbGJbKpbzNw2qjGqC7QqcYXLW4yKLEFX-Ca6TYtS0_zKCegbpT87P5mc +kxu_en0yuTjuz87Ipn53Oj2-RCcz0v7n6dn7pguPs5TuTntIRsL2aQbh2mu7ChS7ChTPuwKFo8BpCAy3 +orfggPBwpHeXKG844GSz6O6NI7vhqO7miG4tmtszjwfGbpuxxb6I4odEa5tgbhKf_QAYwxHZG0RjPwhf +4fhrFnvdIuIaDOoz8-q7UdGjcd-jjZdDERzAcdBMX0_fO1HQhv5J7vayYG-pQAFrNckPwo7sz0rdiane +t5uRGQXDm6HZ6ZOiD1QYqvYidkJwW0xSxodQGWnlxHaoKT0EJ_SWYEGODilPciJjYhFKYxr8AJW6tege +bTV5pUJEDM2z5SIAhNPHymgX65EmlIb14Fb0Eg1LFRlbOmG2gl32PYuSMCkWjGhdEeTGTkhO4paHoALY +5fmns2NdZ-jViht3MSHYi0sSeKvy2Cx4jYnQvSpLsbIesGcJjpIszbH3GqTZqzh0OPE21ushTs9OJr9Z +M5oXWYKres4KxhJytSZsfefo4phK7hfo3fT84tJxJBLdNd3gZnae54o2pa71YU6bejPygKK4yNTAtoQ4 +rJ3eZDjH2mr3PWQPmIKFbni2j-ubsqhrqVTnDts_qKKzTfc4kwYPoFc-K4mGQ109ZkLe2rLidLN5hXGd +zIqF4YI-ZyVzE_9F0tdjnuYVLuutXiUbvyWvkmI-barhTN_R65JTVxjwyHdLDSPwTsp0MJorzi3O6zFa +4qqKFniEfjk6_TQxAg-1f-zuNobhICjgow2OtTB4_Z9W_ITLlb7Syd98iqxmQWiS57wFiio0-PpVrRSL +AWoxeTfj1GPNxqRK_5wuGKbXKyLsJ-i6LJZkbmqJVHxTXWw0a6uANQI0AGgrtXGaGId27Rd1rMNXfnZB +06h1eqHSOlDerJAx8igJuvIk27Qo7QrbOchMs6Y9ZlrNuSoyrPX1iSmaVZppKW0LtkdMaWfN_pZyioTQ +K6doc2iWU1qd13JEzxp2PJeBpdf7yrUPyPBW4X5C29X66n9xXM-pGuPoAk3OPn3gqz9YV7jk4TIDcjch +AxLZ5jO-H_RBcY0yP5sqq_UVGz7wadd5Yn3VLVzG9B2yRrKghwa1mnQbP86iqkqvU1x6i9Q4_Za4jtB_ +X8zOQMudQue8yLHAlR3AQ74z1FHlmN3gyTtW51G_lfTdIs7CFpXZf6jArPU-RC-kceiFXrCanp5ZsdBs +JVmxMLICcc9Y6StP3r-g5-ELKo-wrwwCE9K9UzSgAOeUQEvD6mZXd1LHBu2VFoEyTW7bsMAb2HWGHxTf +dmPwjRHgYL90KQZup8jefU_Iu91tROFVYrGBUbUqeNkGv5sJ-dsT-ZR8zonWL6CzBug2jd5IMa5YlzEG +xLjRY4prXmlNYNcxz3Q3zQS3hHm6obyoyyiv2L1bEhxAQRSVlJvR9aCXtdklmvw2vbi8QIMo_pwXdxlO +FpR5bDwOtxxyifwhI2neKevVqqRCwUPG04xcJf5jjasa1j00ig_1XZqlxbxaVuKmpySH3mCdE0TmgzF1 +Lc5RHFUYsS4ozau0qiuU1igpcJUPaoS_pFXNKrvfYRRVn_u9QRTHeEWwN-73Bn-s8Zr_WbFgcPF3Tf8o +cYzTW_GY_eLtE5yltzQmfkwBM39fR2lGLr2jA9s5m08QL0oqyKl5Dvs9Mfk0YWe9JhOP-z0qwvHGHlyp +I6YnrPydfTzYdxiNbdCbisBcn24ILM1u1D7UHNis25m47SYWIq3bIo3xIxFXmqd1GgHUVab5QpBRmu-t +yoJOkv6Wviv019W6utdJiPyZF3tRXt1Rkbk3iKM8xq2oy5jqxvRljPKcKUzOgXCgPMZa4F1X4tPn7CM_ +s81GZwc7ZgNsdRmlWTtOejo7_qeUAlvI2wlO1ivqFcijDVwttGpyyG7ZRR5HtRI1BtG6Lt68fjNQZkCc +x0WCh0m6wFU95B1eaGlZXozGaFDdRP94_bcB-fMGfxkob702UrWCynKzN27nafJlzlryu4IUFNBQjmAT +RYO0d53PcV6tS17UfC7HgaW9v_ylryQ8rr5VwE8vZMbOvi6zaUhHhqhlYz-wCsHVIB9ptyKWzKer63S5 +jznpPFDWq8uFB7nop8m72fnEFfh0YQ_2wfGtVoAXNIMTclYLfLDFbUV2oEVWk7JYsWQSCV7hPInyGqV5 +gr94w6slc8nSZQrrXdxEQfMVLue6OsZyKXXe80g69oJJpNor4QRMXpJm4pVusacvhS1INFDACG2URz_E +G7EgUNFmnUfxZ3LRY7ZWoEUdlQtcW1_oq3RWbGXiIr9OFwyBmjewjVXj4CRtltEXOBRu73Xz0WN81Dlt +1Gf96jfF89y8SEylNTvzZooSjtWbcEMWBO5-lAKtsUXlNEkuwHrV3WX0RQs2f3OI9l4zFnMbZeaLVwd9 +pwQvvSmrIUSSSqr1MdCqefpQVU9wJ3CPxOk7bWha4N_g1oCzIuWPDnxqJsezT2eXw_8cKSA9i6KBq9LS +Ec6tUok6EP5AQTw6O9E--FZ7bwJ_NL2YoAFFDsJfYowTnAzQp4vp2Xs0OT8_np1MDgfUlX9-mxYZ36ua +HvEwhEKG9cEY_UxbLqMvh4OvXyUsAMKCJwo_UFqcJ5qe0-TlQUpFoGIhtDZtDp-GzdH2FApD7ocTPJ8a +YaJ9GvlJG1ZkOdibfMgJXHgwE7I-9_w5kPegfWr2YwHSnvc8EZ_xIerpmUxLLgORYoDFQPhvz19guu_G +XECAPeAF2IoPFIunQIyhDUMRORxkgMHszJ-nB8wrszFzEZ9-_lwFEs-fkKGEso4EK_ITBgOmxXhqbgMg +8NkyGoMsYQ4TWpD2TMai_27cxYTSD1OAszjft1iKzRpashPuUclYBgiYCCb2B80-nKlwKL4L3uLc778p +b9GDtH3cRSWLh1kMf_8t-IyNzGfPZ3RKbctu9DXqznDMvbEZ3zGgDsHYggHZ8Lh8SOcpbdiQHi8iqxzN +fMEuQz2zxMacBwxWef78x69EfEIuBK6LBqoZJkN4jvbkybmMF2XPltf4aRPmOP71aM9tQvuhG88JQO-D +NcB1wnBZvAdiJG0YENXqCJU4zVXgT7G1OcsxBv0OeA1gj3hCJgN45PIYHyPtFnXVMlK0PS13cZH0bNkK +QH8wPwFQ356RgFTejYNAgDpgBXiGBwSLWRibvg2XsB03uXmNgOb6N6sUrZszDPuDz59lBA2VT8g8bDh0 +9qHX_qLcQz54cuYRQtezZSMgVcKMBFyG9qzEQ__dmAkMLgBcgKF4AbFYiodDtGEuhvleZy3CZecFc7N6 +MUbbYC3crfXZM5SAV8MTshPNW6WRiVDzs1gss3TUk_MXP_aeLXfRCBPmKR5f9zAnMci9G__QAQo7PoGf +sjgEuM39_IFnZGX9gQRTNHcrpNChZSCy4g6XwxekzYuR8vMxBhVZ6uVIMm19u-4y9E_210L9WgxgZt3V +b4gtB3DOUJrOVtuPoHhmjBxiqRp1sO9RyvMthZn5X39k4InnLzGmzp41zabNUcL8vNreZ52U0WO9mtYW +brscnOd_1oD-cU94yijHVrtKvrYgznLJTr5bc8ubM21LKyvUy9WTn1QQ7p_tGQWS9mNepc0ttOFl2gB2 +o-u0DYZ1tll8B2BV1EPfzrjSkb_wCpQ8ZK1q4XjO6HPy2_HkIx1kwLvS-nWyoGWRo_omrVBNfg429ene +20OrkkWK5fgOVfX6-prljcB5jUtRK4_GxUAO4GJ2RSZkBI6uNF_0HTdwE5V9kJgcfPFIoRIvi1uM1nmc +RekSJwxWI7G0NbwKGlbJBrgPP09SLBMOeB6LtHLtKDiEC3f2AAFDUw_HqARI8WTW_0tSuCdVXaa8hN6b +Q_RfrxhXirUTivzOnROLPvYjm3Nsibm0ytdZhorSQGYlQFTJgOkktTPEPWasVaV8niYn5h1PZ7OPkrdP +fpteslQfOTpEr9DsHMXoUE1cy1z6mJMBJjRG8cvX5uEHzEzNboxibbCPk_N3s_MPaLWYVxnGq-FrLcSD +YOCg3-9N36Ec_UCmTc-7HshMrot1nogkxfTL6H-Lq2pw0O-Js2lydsJoJ8g3usaJFut6UaT5Ys4joYFw +UUTjDmRYJ_1ZmT9ZhN6ccIMaZ_cs9O-6KNG6wiy0r6Ix0lfrGt1hdBPdYpFlq76JahQh1hVV91WNl-gm +oiGCbFj5xVp8TosRpREe7Oc9k8zKpQpx1uNHW0wdivRgPEN33xVQ3OJSufTyMWSUBI2t0zv1ncxzzvet +rOkyBqJD-nQ-lJbMwzNLM4nCY6ZLf0hZXiQSy-jhlF6SdVN6mFSrjRSEBgCC9xJ5HsMZRK7TMjxfccEL +x32KaqB1ed-ijGCOv9Rz1rjNlwla5i1T-zywLiodw85A2TGdv8j5bqeECRUbDVXvVhd7p-yP1Fx0qn_Z +ovRlqKgmHVW7MBTLOafdeV7MGUHlxZwmt7cSzOj744dDRfPkZBxKUuSnZZQnku7YIx40OgYgEGTS6fMV +-Pm8qLt_X6ZZUjhogiDN0VDtenVMiYNghIpSdgxih0McAI9r327jOXV8q2y4DF5MUAMcJRRLsoexUTSU +GQ002vU1AcmTN5YNWyy84A_zEv-RdFl4h8HoHx-5OcDIxU9hlKzx7Aw6H7XU-2yZ1LluDIRXoVHA-p4N +IwqsB4ZVC9M0WLwMjeMW1GgYjvLnwICq9oAcJhxI6YzhBFPCUkWLZp4wf3ar7_f8_HxsveTVMvRwYH_n +fs9O61en8edQgiwCX5TwciISlU4lESO5NSTQ9dJk3O_prGDc7wElU3pKTDJTSJi_2GtTGlEpJMb9nqrO +0hOH5rjfk6mkehqR9nueqqU8m3YvKxb7bKSoqocg-4q8MhmZKs1RUSygEjH0OVCEhn2Lpgyh9wbWXVTK +qckjdhVAOKuwdtrhPAlAwzNIBr474MRIM4UEv-_W_6GgUE6MczE3eyE4Jpeqbnkv2jdWw1skl15L3ao_ +WbForplD_kPzjlMYtBxtRso2WmyVthA14dUTrcr8j6ybgwAGBngziJf068t9-VH3rkG_JRGDDvmH-W82 +uMhUR61n-84MWCO9WDJVTsqCx3qxY1nCx13IQCmfPmEKd2l9ww8-whciusMrnOG4Rkla1Wke1_2evgnN +Fe8toy9D57Ojfo8q9-Al7v1vkeZe5BZ5GLn93t0NLrFJx_3eoizWK3R1rxWzUYD2R32eVKaJsvoVrjV- +eqhwQ-ta01nJR30Kicwtk9c6OVr0SSQaraGHNOBzTNMguRxZz_Atr3OHSO3-g9ZHipMWlODAPdn04bSE +S3GUZVdR_NlIXyGFMKjhfHVT5CIZz1zlIPLXmIRH-YzvIcS1gtNKgdkVTFkcVG8zRqoRXCPPOw9DNWIN +GhcJFpq_jkmFHPspUwxtIbtQt8Q8dD21zDzB1EIsOZiRwd5IYB9MZSRRLQdktTSvi3JerHAudLDsDjB0 +xh-hQy0fkLjaaXK-aW2nAfEivYtIoWfkFtKF6QfkGYozHJU8g0xBfhYV9pqPlbHHTiDE8I-2YBzskB3n +6HRyfnlxsOUMP32WclvLga_VORjB1p9AOh4AOAjnAjQnK3gjaFpucCIngLn7PWDDi9_kmuEhzoOGzEMt +d1DjLt5WeqqOqan6wrpi5tV6aC4xh9xB4gh5RHkX8ZvvpAflymo-hOArleTZcIJUPQ0-k-G6n0tUm6ud +DXSFjZGzYmEnH-D6T82XpZPFQKTutujNwQEMFoHHr1rmuX8D6lzrDCyWJgrF-O30eyb29cq7BrBns0t6 +YCOess8wDbGsHLRWjejD1YQP1MWj7ZXYNZHuHaQZ-9RysZXR6HCGSChVX2LSIyeNWLMQwSz_c2BRGkQJ +bhZXGiJwXR0Yxxy3Yx01jF-LNNX0GNRyhNNzUfUXSavZf1lXb4XabYg13RDKt9WGCOXnJ4RL8pP7LVwa +dCVqpCp3PUOPbRdT1RUjsiEbAb1lPbTRt5t5Sz-dOOVplQasWfeNGquzMz3bvEe0Yn5FduY2qlPzyFEh +-odFwEa4OQlsDP70DA3NdOCqgtLIM40Q1UEGCQ169if7tAGlk1K-uR6AOl51BULz-XrQTrVhnEqeQ6nV +KetCJ6ldMxUA2DxwxGkYk5AM15ZwzFU42GAgewRX8gzTi9O8YZc4lYo39Yg2JJMqWjIIW_lEz-OlUbHl +wE2s20LOgzLutnZd1kEQ6kbUMouaxZ4bNos2qTQh8pn-acj1-K4s8oUAqqPnsbnL2BDzZVTHNwM36e83 +OHHd9IPbo5ytL2_DaorxH3lJ4YyNvnXdquc46IEN73tGTNZ1Va--po6bDi7jPh5z0Ofu195UmdAUAsTX +CvzNc4cGyd46UH1pfcs-IxuPBYYrToQRpmy2b_1AWgUtGPphNn0nbvdNqGzMYAoP2URggoacI1IN04zp +QM82RLdpPRmzdJpPa3KDo7K-wlE9XxZ5WhflhjVX1DisurXlSZrmzHVA-Ije4Cirb-5V6m3xwHUVdSC0 +fUWdRNo0Ds6q6oAAzzfopi3MqENpj1HfFwWwCbufnP9ydGqNL30VMIwT12nSQgsdQPZqVUfQsC04qIJC +-SB8bhLM51sdxxHGQkNDRUrtFoTcSnY3V3JqgNchNLVQmTxbfa1JzFpVu20WwOOar1DxO7uJOWX-ViuL +96AKbMbg32ainqk4rM7mdHqBgtbczQmAb83q1I3O3WVasgGIBLeRbkCN-_zDQJuxjp4oINSFRAP2OeUx +CaPs2cZxulshEMnpWYz2ormzB7rFcfqABUELRHMCYFg6Hy-HcM8-1_RhhNbEReLExjjyTju7iJJoLCn9 +NsrWoLxEPo7SvLYe4y-rtMThMBJb8MqTeV2AlXLta4UCk4k2ykQAgT1yCv16L9qOFyyuaobsYNRIV6sc +vIa-OsHAancw1nHoc4yTShnpHNscGKxFrpp-WMXZ3EqjC6Lb9RAz8N1ZDeuFFdDJukiFlJA0hDdK_npX +pjVGWRF_ppHFNHwZVQUq8hijdU5e4ASlNbpLswxdYXRV1DfoOv2Cqa-eOIBpAMYqi2KM6gIxUzi6Xtfk +P3FBa__hPL7fZ0oEb_pqAta79AuK8ntWHjDNFyhJq7jEK5zHKa6EE4UvbT35f-paQZPN8vp1hygulqt1 +jZN97XGfp03ocbdncaQJWRDRJBt3vPVwhGa_TM7R8OPR-eWUssWffocDLqSfqfaxEdpDr4mYogPQC2Z8 +H0motdhp8oopHOSUNGsVfavP_Ad46htYQmOZwZc7-EGlen1uVaFVR9MzNPnt-PTTxfSXCfowO5mYcg49 +5YVgQ4Ue5gqk0IgeO3f-06psExyXALITnOG6jQ_bRsgObytpWjV3lf5rD70GbKwefM9OT0B8G2ZXffS3 +3B1Mp2BtZWanJ0-xMnYua744eTFfRKuq9bVkVVT2pYQO57-VcAU4iLIfAuiERPXluqJBbii-ifIFhpeo +uFb8l2ep991DltGXoc7oxsCO5bMea_N8_F3L71gE228PdQwHsUJD1BfRihxo2rSqx1D77-2Jo5IpPyuU +FPmgRnG0rigQVdNVBCRE7QLCtrVwAwlheno2vZwenZ7SSPPJ-fnkxLmgMAS0STpvbwzb4pvmhMXB0Che +11zpQoxX4doznDqmOtTN8A_nqrTbTcXvL-E_V10LSZt1bLdg4fMchjV4LunAMuF9e8A2HIgtL8IgaT6s +mApA5h0OlBCV2SdJ32Yrff3UNyQlQ-iE5KUg192U4wKscEvnbtsN1g1lDfJOUNbZe90PYsl3HPepPrhB +qgEkmg6-Yy0ZKzsc-CU7UASsQT3VnvdZYLbi144r9oZQtuDmrdlc-yOpibt34zyNLBDwSmrg8M9LJeAp +7NBeIQBXeyE_6GZeFVVKXusXZ_FsG4oAvWiJijPlH5CXf_nFXqASCnjvJy389376Vk7xB2COB539zggB +lUVNYZuLYbrf-uF12ejOL7GHtllK5htc7kG0drvfd0NraI_Iu722ReSf8K3eRCs5w_Qn-h1eDsSOOo0a +v93t3cX-Fi7wdMDGC7yOph9czLW5ruvt9Vu6tqQNl3XJlYI3dTWh7W4381auoa31rVz7uKSvZ3o9b6I4 +703dj-mt3NMDW-AA9kZsOA38d9V21aXCN9XgMRS4rAb4rP---lCAmzl854JY8FptWA8rsPCwLByYSxMI +HSioy2xa0AQsQXdZFplxWMbvBgPpuVfRchmV99SePLZfGjkEVerAPhQZzJTCfLRDVK2ylEE-1KJ7xyi- +KYevX43G6DU3G6vEFso9SeW0qepyVVTgECP0Fr2iaW76vR77-FVVl2bbUP-Xr1VgMc4qLH-Qf4OBesdT +llGfpzYhxgILeoDx2G0mZq43s92qYBlhdo7OJx9Pj45NcYFXveBhvdqcW-SM5se91gkMnrDbHJIBPwzJ +SXI6PXsvcMsOXxb5x3Dx9at89_UrH4ZOfwQcgxwY0dcHCEhtFoQGxR1Y83ApD6Q-35AaBSLrH6dFp2fT +iDpNgnRp0aZOn4iF5yIrhCMoU7TK8Q0cVq9eAWT2gFD0AOmCZ4_J5hzODYMXit0PAxDOp6Bv-rHzgpPZ +Y_hLUu-J4haXZZoAlQvb-4UrXww5WsiLiRsAybFUp0t_TlbbEylPOrVn_jGy11vtkyOaTZl60lD_cbg9 +zYo7Emldk8SIC-oWic6yxz9gAAbc8ezodHJxPBlq0IytwUfSnUfvqYP_w6Hdhc-xXqhyBGG_eVW0BoR5 +g9BzIbBJMkoTTGS1uMivszRu5wMrGjPI34h0No63a5qwy59or258JhUDqgCGPx5rp9_8beSxRvZTvYeV +DlVboTRnIcDmMvHYd9VupOdcAMis7TCK_5vwqQ0zlruPKQNPjz5eyPw1sg21loh2THUz_TC9RFRlII5m +uUbCaRy6B4sVkK27VvLSl1Eno3mUZcUdjaJm7rUiU8D07P18enI4-PpVNH3zpsZfvqG7rW8nQP62TiCZ +ScLtvW792w8KHLdOEL2Ekn0cWBtBI6iHswoKrFkG51m7zDccvOiJ_OUbeV13lmafnk9fws-P2mfrVx-i +Y9i5fjtbHNw2PHA7HAdmfh9KKdCGnzR38kAYFkRl-yKnSRTnXLpiYuc2xJtxZwkWqrTQKM9uryaBJdrK +ckPIIx1zIwpzyTe4tpQh1DnfGDUIL0hDZKBUjdEwQv1aJPeS2QQufZ8m82uZNdRyqO_QX_IockOdvj9j +OZc8PUYam5arBNuvhnprtXrnk4vL8-nxpa728XzMzkO3uf6nwrRQHsfGqqAeDSxidc5sMS0UQfzskWaw +B9gyXZuKOfM2TLptEbItYoyqW0tclImRccKHMNtSqcjdMlE6SLRMUBZZiDvK1rAEbU5HH6wDEark8hA9 +JXXCUXY0SqVJ0h3VmiHYgfoQePZSNw0D1PkcKJIq_U3cJDh7fNzsubixbL1tLOCPvFvpASdBKvL5dVpW +3D4SoiD9yqC6-LLD8DwwlPGp1luyKdMIa40pvDpwU--ZrETTMhhFaz18Y4yqm_S6ZshidguZis9QfxuD +qXmOmYTDe26eie9xSSFKbqM8xvOinBM5hvBwwblD20UnhBzf6WRANTjsrHSIQ6cOD9oVyWiDADSjTgeI +WOAtN31nQMbdMBzvC21j-m6UOpxi_q0JGyLDBmca5TTi87KZnqHhq7HhasNzQNouaaziAdJ1UgI7cikh +S5GeOAdYhtZLQXF6eqHQ4D_4Bev1H-0G8ruBANxvH8CM-0FnRnqd0TxejgQTsTcDC_jVXHMaCOr9-ezT +R8sLsT-yPSOZV7PvAGM_9pnHD6uHqTyAGD61ZiZq5ReYgBXivp2ZrqhKY2KH5kilnDWAGg61djwYJT50 +WEYmPwZsdM3Xgb6T6twiZk9iSx9d2w5i9q0K1Gq0ubV0mKAuvQfmZ6DdM03dPdVkUO0nYl4mvCE1gDzs +Vj9uAbzPIQmUtz2OU01iVt9WZm0KV7M854Hw9SuPnNw3_KMeAptPDicQbdPpCJre3141O4Ntd6JNXkYA +kH9_1SCEbQ_EJmGvMesBcESHVTJGNRM4-dSfV3elJbh3KL2ZWbunCOBZ18gqO7rpAeytywhNjKjLWF7e +FGICzigtWEBHpvLMOIoz40Z-0oi_rqpQwH7R6giHnDs95yvs0wkeKy4sLU5H142pmVX61YC615IjdPuN +Nnotmjq9Zik1aY1IAjc1ThS5N2epljBIlpUEi4c6NUbY11jZwCIHy5PyspKjtpXSKO8bXqdZdh3FdVGi +Q_Rf_7B6u99xO_1__2hZuwfGl5OhKjj51pM7n1iQNs4M6uGQwcP0wCLibzNVMFC4Wio-tfwJRjIFjxp4 +02wqT6APdnDUSSW8CY486uBmHD1EFaHTog9o3XXdzEAC2ig6Z5ahGg86aOVoO-BizK7aI5Q_QCo-PAVt +A2vmrBcDU1eAsCcizzN77Qn5Br7AdCGeC5d3s7r3VN_8W95UYY4ASzVC2oJ2iCvRbA5YYBvaJltPcEwI +heT4ah-jHZpzcKSGIO1GiSS8Om12r-EwLUexMcgPlRMaukbd_ZjTNWIaOX7PKEokDk2ZAhlNPvLXY1rb +XWtfkSHvbtKMP0jzBcK3uLyvb9J8wWO6RTrpMYhEMhjEM8jIH6LPWJWrqm8wWpUpjU4gF0BU4j_WaYmD +SLrXr6cnJ2Z1KFkUygnSsdNkA9mxg14w9sXWA9ZcQABkJmAVUzXeoSQX32iypzehCVW--urfWxl6L3Ct +4yEusjErgM0dkuThsB9Ko0GQT5msnmnWqMdMmayo78zZLC_xLNDTsBPuHR2E9rWteoLQjUrz09_GZIWg +OPgGJxANNJWWRWFDHnhaSARY_mSLghR8BgTn2-6ACqi7dTRcsBzYXgVwAOsPWlFWW1KOXuT0A4SDtnXs +0e1gIar0ZFt-9DVsmKGryxfcDVo2b365H_zCfrA0awDzW15XtmXs8r_bWVWrdtw3WFNtdn09jDxcZ1er +rMsX8EF1dr3obse8N7_wN5-GB60AAO_lkKAYnK8TENdtR-p7EB4gyBADjNin0QsyV0853uDGbdFnA_ro +JFat7JL4llCn5D3uzatV8YOYmC8m_IlUz9vV625qHnPShcEGkGaX-za67E2MY1sDsIXC1xc--11SxIb0 +0M648S0JYmsQdrSWSgbRXgGxeb65zjk6W6b966rBaZeS8_mi5bE0WxBeOp0mO5fOnUsn4NIp0lrsnDqf +xqnz0c_c53WedZVwdlxqx6V2juc7HvWEl7BQHbESLxiiRYyzqojqFA-jVoLz6dEpVwf0-5aIRtmfFrur +BndFuQdqLfMV9yyZSzdGJyVsG72l32UFVl0S8oQNCw-gR4CAGmbZ6nLimBUCaskAPqHribOBGkYJKtw8 +mrAwSO0vkm4h2U70VmJD_VfknSwf9P9grb-ZMf_VgWyaFYVytNCe83IKciB0iN4dnV5MtJ5RVZsN9EQa +Vv6iAFS6ga2x4Ixk59aEUKQlh9usKE3nndpcqPNRzA0BGnEsDR0NgCDifnzrdYnyGB-CVGyplqVxC7mu +41yNvc7TP4RSOKQO7uJz4H7F74Cu6ajhb0Oe6LpLQlM3NyuZ9oYvJn_TGALQRVWvzfzAvyieXmRVwCCR +wOp348xNZBTMrRKt6xuWnqXCVZUWeWO5UuWWu1mWFPqxaIHz2ixaqiWUBfKktEyP4tafpxPkZa_F3FZl +cZsmbHSo0DwvvQ-_fWD6FjKE4cmjATPWPj3qj5jzoWVYfDUSbox0Zou0vuF1u6sxe3QVVWksU7LooYwW +LjxfljlhZODigH1lMEb8c1oj7gNjw7KtL9PJDNgLIgZD3zUn_BD_44oIkWq4BlmCX2EfMEsygAjGl1Ml +J4yarvgl2xsBoD7iQSczPR87uQMKuj3Uh_z2Ds0c6RrtPC3WJWkTrEjyZqkCvxvsg0GkNl6dOE1n07by +yHWXyxfFau0m-Pva5m3_eX2LtstF78INYgD0cgVn7PuCAg2aon98a0oOgetQoqHLhyXLBEhe45Vin4jQ +cu0kPJR83gwyl9_S6dqkafwlztYJTjTC1lQOxocPnDeG9NEkr3zG92WaL5SQAqtFjBLSn_F9hX76_XJy +ZB_06SJP8wVpAL7P8Zc6_FJoiBpSvklFErtB_jR9r0coNCdc47NuJczltMrVowlxzcAqKBwNk5tf-jq6 +Lcq0lvml2xfDB4QrdrOn6SwtJ2gwKSCX3kOSGrtOaUFp5uCjJm2bOUdIRSJQwhJ9L9Icz1dlEZPtkC94 +vu8LNDn79IFjaKBuBlxQHtzgqKyvcFSLB0JNI34L-hO_ZXJS8Zu6y81ZkRjxkBV7F79E4XlXwnahvsWl +fpUgkyAr4ZketHkrnmrOJcLXfZs9hr7PPz3qs_OeDj_UMcgvJUMNh_KRxKJ8IvEon0hMqicGLuVjjk35 +W-CzcTv5pydYqZ90AIYh_N29o1LW_oL_eoEO0d_4WSGW8dAgwYMtDSmRDeKj-ydebx9qYEgf1ATTQD0c +WkyAqaoTnKxX9HCZnaHV-ipL431vNQK4p0f26Zp7GR66fYUQMQ9_fRDR4hDJF4gld4-j2kyaTnlNtK6L +N6_fDMbOK5zHRYKHSbrAVT3kA5CPvNBKNLwYjdGguon-8fpvA_LnDf4yMMtoPFVZDBi5bgCCsf5meQym +dRzm-E44Nf_4Vno1v3lDa0Ow8dnrkUfZ2JKEPq06VM9oItsHF9EwVrVRpwdXNDLGQCwDvFuuyGoWrGMD +17BR9WtaFQUyv2dF-uxK_uxK_kiW9z2V_OnC2jzJfVpX_vm0soM0WV9vsCZ0a_8mWvgnjvRqCHFETEW1 +qVlwHOjIDZfoULs3k3-mNVV77thKzX70zh14D1hjr6Oswo0wKgtpyHDJTbBbU9s9OMhms8g52_rJt2go +fm5YZInX7ElkE9By94AIOo8H34P2-tNg14lLfO7ofaZxp8F9uLUtuPNB2fmg7HxQ2jsPaIUWQ7y3W8oD +fiWyqRTIutjcnusJbR2hL6GEip5sdl_h-yTaJ030PBBjVN3GINatDBH_PZuqA4Z0ogfObbzvppagTYG8 +KQivKLWsWB_fh2kkNF7tawmC3qJXMkWFCp4WqgTT5iPTi5hZhR9s5jUODspR2qSgcEy-W1lOjfVBS-rb +ZIoDymUlT6ClVRwQWmLDD927jn5eqLE3aEGfjWnfjZ5-HovOJImxdRwE9rJceH2Z1eJvfbNqx5egn6c7 +umh-quD6Ofb8wFWfnVuaElMjVEiJGbT--wnKmxQszH58M2kVRanlhTHlnPaTMDghGL_U8thtI1szirLP +zrTK11m2rZO7odI51NwwMntubw3r30Yl3YaEWn5aW7VmmmkHik0IlvTVzXOXotrWmPis_XYx0FASLmvM +saN2ac7A3cL9FgI_ZEBn4_LAZahS4BYcN8XwoVF82SW_aeVB3WrPfRq0LDQm0vr0xALxqIoFSkSMOEPR +ahFCVgzXrA19YZvZZHnACicgtdGj-HMbyaOd0hTQSBI-qotmT5li6W-vQtP2Kf7aHthFlohfUZ7oPw_R +gAWGdjjMg-vTzqkvPN1gff8uX7c4b4PCwY4D8DcX5bpVKkWWNTHMOhs-b0VtdPy66_Akb4JhpMAlQUGE +wE31CsQSNMFUOGJ8DMwZschjTNiYr3IsVAEVYGQ-PmZl8QI_f9AWPXaYzWbYaVnbdZsMNiviz1T7W-Rz +WwJow2H39hAZQuZqpensWXzE5Pzd7PwDzzkNX7mCFyPWTxZR__ZX4bpMF3yjbYgt02NBHD8vGIt9QTgw +L4uOk4GGoMfLvOk5f0JU4bWNdNB9kiPJUXjzw8l5To4pqIPnSApTtDeqPLy4vlP3mcy5iTJbn8Qh5PnS +UMPVYBpR2jxcl70HdGkihCaPqSePvN7EC3AXy98hlr_Lpt1KSH9T-DysBsKrahzAueb26rncq6zX7s3G +Vtu8RHFU1UP6f-TT-wnOonsUVdTLbvT16wAt03xd42pAHqZ5jcvbKOMVD333ZPL_XBSzQWBKKsrh6PeY +UpVA7DdYcpXBqsR7q2K1zsgkixzRi-5dWt8gLmcRCJmeoMIZjmuUpFWd5nFtmDHJJ_nAyqckLqIMVzEe +RnG9L2WuVVRqv6hcJkPJZGAOdVopi2UYG6TV_xYpxPTItavIKxTFNZkWgSDkhsPnQEfM8HXNhrVKXJZ0 +95Dh2F9WrUzyDb1Mp2csI2cKTYpT5AwprOKmNTSQxUgN6pHj6WMyLMOuJvtyMPUnlGpoOynhM3qi37q7 +wSXeZCXzoqaDjMY0l45DTVxBmOZ14dOqKGWKFaTIKdF-qahGaGzIA0LURX6dpYR2CwLXTZov6KsS1-sy +pxUO3E_1R30dxkC9IM3QIsGUIJLFVO-LnMAlsNT3krhiOXSdhUWNkrK-veUJQVYx0tQOmtBLR9DXgAFB +a0ExcDT1XSPf6OMvMV7VYoLO1NmcgqzWH8bgOUX8CqXHCuQQUSiW4t-p_GSrlWXc4LJaiDh5pxKFbFTh +P_wleXyXerfGlf3R2RlU5Mtq5ZWOTos7XMpYI3r5ZC6_rCXZLUlxl49RXaCqLlZIoXg7sSOdcG_UYjNn +6GaL1hHv8-9XzunzqKrSRU6kpQqIZjMjDO0Olm59vU4Ty_Ijt-5Vukjz2ox61Cc-Ozv93f0ApCNxGtGs +2WZaBVCBta3PSZXZNfmukQVFeU5oNgHG12ALwBbBEoohFyp5XLWLpGxBNAAPy6NVdVO0JaLAAJyofJQj +bCbK2kpEywUuoRbL6EvoNRNZhWhqtSjxCke1p7tO8nxATdogT0MUHpi9f60DnZ6IJLcOtq7XdCHX3hrA +i-ffHv4tbjn_jjOkjKxYNO4wp4OKB6ccWpy_C5zPyyhPiuWcPB-OWvJuHkRe5HUU1_Mlrm-KxMP--eFI +Be6qjpYrpP6i9y8qif-ryLFp2HS-tVyRe3KCrooiw1EuW1MH_4ZTxcFHo6mENAJOFc8xsuH4j71nNwTL +WVcAPqeNQ-fWMH6wA4QviEeY8RrI3mze6hSJsxSzXKiKjyc4SrI0x15KZc2oOoR_EifexjBFmqBC62K2 +AGhRF2wM3w2rZ5EluKpZtWBNlWUh69PF9Ow9uqpLjNHQmhpB1wV6Nz2_uPRowZO0iq7I_TwT9kQlFCNf +TJgFgdShXVweXU4-TM4uPSUX-LfYrbVyYkk3w--W92IzTctay63JWvbglK2rZQDO-61J2ywm7cG-bAQQ +uDY_L42r_g1krpDXgdK3NTdtJgB96fOEqvQ_hMqkpNQkMjgdhDbUEmqfGZFJcEMLIRsBRKaLmD4iU_0b +iEwhbxtE1nFujyRK-2msIiuviw_NrMzt8u0kUwpLVDdQqagZD43glK932sHE62IB9ARxWrn18slaCwgN +_x2o5RhpBfBNSge-lRK8fTECGJ21M6jcIxtvPNnHlo43BuyJ5eNHJh8XdqiZC77TapNjqr4pi7rOcNPp +JNoJmZ5avRB1NRSvWDKp5tOJMmM2ALMCdbyPwltaQAGthIQQkOFj4S-tR7qo2fL3fZmu0lhQJ3mUiVyA +yqhnQ6Ms4O24IbM2l2H7nPyFXMIX21lNLwKg5fU2VhzM5Rg6r1a2JZdrPCJ0rbUb28bGI_HzLYH5tNwd +ThrGeN5dmqXFHC9KKuctK-5PbaZ9IxyQNdRbACYvK9Mg1E_LNNgbrPPPeXGXD8Zobw-lOcuJw7qgNK_S +qq5QWqOkwFU-qBH-kgoD2h1GUfW53xtEcYxXNU4G435v8Mcar_mfFc6TNF-Iv2v6R4ljnN6Kx-wXb5_g +LL2lRt4xBcz8fR2lGU5YYkLr1PBgkMxPTB5MIdqjhwRv7MGVYkE9IUl2jt1g32GpAjfoTe8i3ARiZCrv +Q9U4vKR1W6QxbiAus00n-tK7bpvC0jyt0wggsTLNF4KW0nxvVRZ0rvS31CXTX1fr6l6nI_JnXuxFeXWH +S9Y-ymPcisSMqW5MZMYoz5nM5BzwH2ucx1g0mp5dOmliN3NZ-HvQbL6NPJFhp4jGLcTWKo6y7CqiklWT +IAt2E3UJbopcIpEmIGcyEG_mU8EUCfbYKPU8bWQ8qNgCT3VO22v0qh7qkiwIPSBJ6DMZa8O66YMBkdW4 +Wfi9aVyRFehoSazO_F3RlUllro23bRJiUKhtKdbwaD5lb8zwLc689uuo5DmcvUJvKGjQloYA7LXyqwC3 +up0PerMk6w1-BG5e9E1XzU1CLLOeWxuHl1kbefyNdyUPZGzKn7zkQcBBhnykLj7jfE54czPzsju0vmY7 +VAv6YYmLJrQVOMf5skpLXLmspFjXIW4yZJfpl2jw6tWbV_948-rV4M0b6eft1PsxdhLENhwRsrpL6_im +uMUldxaNKiMXepqoJOZpPtfEPPpoXWEW45lcAYnLncHZoMVn9NNsdgokzV-XJRHBWGMYPpNFH_88Of7n +sPg8cpKWu98uPo_NLyitT12u8ZhPtjFfuD2yI9G7DcxVQHlRl1FeMd2Tlc3WLtmhux1O36HJb9OLywuU +JuO-5giaJuin6XtWp1K8UD421pC0YhaNsBelzJyUukYhiy1BIMZr_jzoxb0FMMBxW4DjcSbeAkDwyM0g +WZVoHw6JOWAzAJ5w24cD4onObQRIUx9IsXkL4EDDdgIGl2WxFcQ4Y7YG4_9n72p728aR8OfkV3AXPdjG +urcb4PaAPcMF0tgtfJfaC8c5XD8Jiq2mvtpSINtps4v97wcOKb4OKUqWt9q99EsRizMcPnwRORzNww4T +TRuiai03RafpaMAKXWGgAexmArbVm-y-KSt0rbgp5gulyiuAuOwcuBftQBn8e5lAWcfXJmHSxhdRYUKO +r5TChLGpXF2UDfhqcupUCZQ0qHsqCOmj0SNZsinxHL7gzKQzUEnVLLUlrXugbwYnb2DvZhWTe3Exz-yd +YfaAMUXuIUxNkncbweXZZwfD5P5LxKqbTPlHNcstXH0tiztMKruK9zH5581s-lpzS2JnziJWxJOdlrx4 +cT4aX11f8ryty0Pu3ef-Y8j3owPlw1-eWA82uFKBtqmFciyjg7UNJUVe3MkbRfybob6110kA1IMkQUgE +uou30exnMiQdduDv9HR5dU-udnr20Ff6r897q8_6ps87RCbqL3bqUFufLN5GMKqi6eW7cZ99Jju7Hv11 +vaIHGNrlPapqvYo4ON1eHz59Xm7jLwpHgfrpMzFp_48yvS_GUKVGsBRGnkbQAsvtGj6biv67y9I7KtOz +24T3WPEQEq_4HANXs3fvJouauTr4CqF9ti-XiF9-iX76Sa5RxbQBcIuUQjwK33BCeCQNT4TMB6F-ZC4l +dQ4Y2y1hzujfCxAUDC8Q9UBoJQDmQdWAwqJ6xEFxawmDx0ea2SKggAAQg4gzA7rBMSXDYQHJtgIiHAwI +JpIc0w0LIh-OjPCWtBQcjQMUQ8ggCXXD5NIUjpWmoYWALbP0w_o-2qy3a2s0ac8cMLnkgxDShFsIDu5E +00FypJ5EwSrTFwQa7jFsIXh2BmsTOruECzivrjDYbBV_ANBETpYS6ES5QAAxvbVgFIr-EGAyj28ZlPwE +GQikqbMmjHbqp_aCmDyUj0eeqDoYRF1nbRDNjOrtAFHwAUfbLF3vM-stYhdwQOfVFISaraGFgEGQSs76 +9lPyZMJlPnaA5dESBJUp30KgOJm7gY-geEdhsWWC0OBiLQTBk5dYx8WXYQmFKkhzEHqe3IXtA9ROl6Tj +aD13wOfTE4Sanb-ofWDlyT373tZEST5wwINKBuEiJdsICJa1z8QGK-OCqURfGGKYkjaDh-1V9YdlcNXa +mRrhFu0FyDWiykZR7ZHTxtHiCCHRkcELOWAq1RiEmSNmpsUAwkenLuDyg3uL4NRQDSiQbjFALmzKYKmN +SCvB4MQBJhYull-nVBgSGMlvO4BAY8V0ULAiDoBKtAWBhQbFtRo4Hk_mRI09L4fM0lMVLx4u11qwtPA7 +FC61hB8wh64qkGkxhu0DDftg2EANK-KArURbEG6YjrYCJ8MsEcjEQx9YmIZwmGTcaEsBMiJAEZT0Ej6o +nLrC8TLCXFsKmp3eBAPOLuUDz6szHEBbTVtBzB6TPF-vcOzEQx9kmIZwpIR0WwF6THLlm8vMgZRdygeZ +V2c4draaloKIQeYDqBYcJ288EnZ_fHxi_YC-ZiPhjg8Xayq-qvlQpKZidU4V1nKKiI9TB0ScMkbglFfn +zd8oN3vlesy95Omv85q-3zr-CuhUdybNXScc63c_nau6OR_usc7O4_yDp3GqNe1xOoVT5jQ-i-YO9Kc4 +-Z7yYNjceemUB4u6-20jqYuUtHfGpV_mmXkhrM8IBRYWu7RUaCAq2JKXH5PlJyyLgrsWZwZBl3qW-YF0 +u4Jr7hVLvdMr4dE-SF5o-NWocKcxq7NBjecrHI3fjOdzqjOkqY1UOZ0t3NUa7TTurAnCE268Bx1MgVB1 +H-ugCvKOfOx6STQ3u05zaCQXms7IJZsN4b3xfwHMfHyzmE-uFv4x4oq4RYfq7nFJT0kVx37tKkrGunGS +X-XZA1mnq-QLWX9giQx3QHJ32MQbmFLr1eB8mSdUmpVbZin_rHLzZBYlWYp-Td1lj-kS468RcuJUqFYp +76xbKVNugKT4DahdJFNzVa0yoZT4VMJ6ogJ6oe2sOUD42iuY7csAM8sDkyanw5G_HgOUbVHNphVcyMGD +wRKgjQvhKz2muYiVJakJSpDTmJ_CulMTET0aMWIvtZFBdX-8q1YxlDdr3R3u6APuqCib84q2dVqx3UwA +q9_wb1QwQiSjC7aiSKyGmFEkyzt6Thkjo0JfVgG-Ej51h3q2VY5d5UAbxYGy12Z2VYt4ezrbRo_0NBNW +c1HUUSt-UArr8AoAhTXG3C41ku-2cJV0BpxyXnfAkMmUjP9zdX17M_n3mLybQfZ1XrERC3czXrB8gsWp +Z8jSRqzoPnOfH9Jlt8Mo4Tt9Uaj3XeeCE8Ur2QOBRp6qsNIQ9pT0p0UtaAWyRO_cZ7JlbvH3y4qGcWjB +GLr9xRrLFN0nOfkL-ZEMyQW2ReWJVC44U767d919CfdVrMlfqVsrozeok78ZIVZWxrMjvyTs_TfxXbLZ +BeX32d9re6rb28lIJsNUyZS419CX3ZeeHpCkP4_x5oDmCSpYInQb-lRP79wi8qErBmuYavBsKlqrq0Gz +ScJ506NOc-Cwp2U5JI-85uUm6H6nomb0oteSCLrpZVJf4ar3iOabmZdrpyWOV49xukyiLAdnVp7tI36M +j1bJxpmiWM0alSafQQBmyEBQ9Twm6q9KTmO-zjn8Cjzt8WKmKpEJpBBPCVsKVF7EIaRVUn6RqaFUy75h +5dYrNLuUzL9kZpfaJZtkuScX5EOebeW9Cvn8MckTgtXfo3Vd6NXwAehqVXDLLBM5viqURRchOGqf2YhK +hQneysnldCRKrlcKoOqDh2y3pgL03dP9gaW8Kn777oKnm56PxnPy-r0sPBrfXMGj68m7yQLemIR3gBhw +kxtYi08Hq5p5iy8dDp30DeUY0WSo41-_Z9WRWS-BNpJF-Hnp-J2Wjj_1vHyelsdMy1oHPP4HGZK_eelM +SlMSE3FbACk5azPOGHqE74oltbTpMMwLAm3HrCYJlVQi2M5XaZOo0nEFb3kHl5skTg8PJNusCAt3VAcq +ihaDGvhXKBA8C2ZXGXLqNBcTW-GktmczYUfb_WEHmS-Xm2yXrDpkNtdKMKcGcH_wi0XyknQuyCp-Ug5b +IGI0b3V42KyXkJezSiM5nQBdH9QVbBt_gZ7bxl-KVdetiT59O5_d_gyriMYQQ21haUz5gEcV0KEu-nUI +QsGDml_m6MORKiyGsBxP_MSFDivGz-AcU3ar0BNW1Vntp0EKAkCdRHxa27NLMBGaE9gkslJAM06LJlbP +B5SveUAxdwPaHFZXKN1iNmR6z6eU51PKV94OPZ9Snh0cz0vH89Jx7NJBNx4WuxTEDoBbNU02jD9c8pNS +Izo315dX_0Koo3SuPKZAuUqACwKbP0pu2msdqMBAl-H6pYGLdMB5y7BN9jGBfO0WL5fFMIU23qKZsm1E +aAL9sVhFnkqib0YLnQXQymnS0S_lh0uFhSmO9nRxi5ZxGmXp5im6S6IsTdBQOmfpIgBUzIIusNsCe20R +S5Bm-_Sw2ZA9_fGCJJtdQn4gSboi3wkxKSXu5CpLqlOvmqSCtFOwB5dwVK7nhCjNInHulN1S3Gzhn9Mw +sgJhAD4qw8ePesyRWut1atGBkxs2Vy-nI6171N9V8OnvxfyezUmImqI4pgpRoxYPtej2-rokKNlO6XZ2 +iul4ph3Si2ZBxymMw1ohKxLZLq73bbblXy5s6TaQm8R5-s7Pzrrm3IzTFVKJ9lQ2f72jv_bOz86ynCpD +pg-ujwnCw8IAoctgGrVCWJHOMdc0tNF9Uc6aE3hHqM6S8n4wXCvWyl_9pv9HrxOkGXJpuYackF9ar6S6 +Sq-P13sPz8cNe0eb9bH9z2jEeGAMQqQO9x92BhqNpdtq0uXm9STxbKEDjzpQNrMetVbTpWEV2n5Qw68K +GtsSANgEMR1uCmeLsU3ZHe4iZQUw2YiD1kaYRTCDLF-38QE4r0X7ALyLmBBlD7vyd6kS46g5DjV9_peG +AYy5JinmZ2lS9MHAiIRQaH3N8EQZBF7CyhOkQwsPOXdz8dCpNyXAg5Q8JumejkCtJ3dJuu_Ae5aWoYDB +sBoCGW3e6Xk5hpHoduAa9r8B0CFYAnSxlaEjXI1itXcRaKAtWkyLB1ZLwEdPf4Z-rdd7viA0lgRAnhxd +EWh0M7eN6aJrUvzH-5i8fr8YXxoP6p82teMuRh1SMaqKSVWPqnJEppXkWsCSLGgRa8VPDWxJ_u7dknB9 +6EIvYirh4D1kR-9ol-y79Ic-6fz6Obn7mGWfbufXv3UUNjU4wff6ZJ8fikhYdqgfgqqXr17Bm2oy6ii2 +UcOYK8O3q_EbatYi7ev01ZbAfy-J-txlSdN7QJfDR37RudvEy0-MOlF31oiZoMw8izDd60iIlzB8gcZd +97CUe1QM29xjE980UJQ4-p1ff-vwPRL8wo4ROhEn3s_EvCUFeaq6WByo7r6nnLLd976vqtRvHOc8hbiV +Dd43gp1PxbqSJx_yZPcRvyagdr14Af9JOkz6bzpbTN68J99-f5_BS-F7pu0l1_atlxdxwPVWdHwi70ne +hsPDSnzdqC3cYpEGp7n6XgRTsJeeAx3PUHfZY63TAZ0wOP9fAAAA__9Go9o3A-EDAA== +` + dataRange := func(start, end int) func() []byte { + return func() []byte { + parseOnce.Do(func() { + dec := base64.NewDecoder( + base64.URLEncoding, + bytes.NewBufferString(strings.Replace(dataStr, "\n", "", -1)), + ) + r, err := gzip.NewReader(dec) + if err != nil { + panic(err) + } + defer r.Close() + + buf := new(bytes.Buffer) + buf.Grow(43242) + + _, err = io.Copy(buf, r) + if err != nil { + panic(err) + } + + data = buf.Bytes() + }) + + return data[start:end] + } + } + + Files = []File{ + {Data: dataRange(0, 6105), Name: "migrations/20170426134008-init.sql"}, + {Data: dataRange(6105, 8203), Name: "migrations/20170428154209-users-table.sql"}, + {Data: dataRange(8203, 10464), Name: "migrations/20170502172843-user-settings.sql"}, + {Data: dataRange(10464, 10807), Name: "migrations/20170503144542-remove-carrier.sql"}, + {Data: dataRange(10807, 10965), Name: "migrations/20170503144821-remove-email-verified.sql"}, + {Data: dataRange(10965, 11147), Name: "migrations/20170503154907-delay-minutes.sql"}, + {Data: dataRange(11147, 15527), Name: "migrations/20170509154250-alerts.sql"}, + {Data: dataRange(15527, 17044), Name: "migrations/20170515120511-escalation-policy-actions.sql"}, + {Data: dataRange(17044, 22368), Name: "migrations/20170515162554-user-notifications.sql"}, + {Data: dataRange(22368, 22644), Name: "migrations/20170518142432-alert-assignments.sql"}, + {Data: dataRange(22644, 34321), Name: "migrations/20170530135027-schedule-rotation.sql"}, + {Data: dataRange(34321, 34950), Name: "migrations/20170605131920-twilio-sms.sql"}, + {Data: dataRange(34950, 35586), Name: "migrations/20170605131942-twilio-voice.sql"}, + {Data: dataRange(35586, 35939), Name: "migrations/20170607103917-throttle.sql"}, + {Data: dataRange(35939, 37472), Name: "migrations/20170612101232-escalation-tweaks.sql"}, + {Data: dataRange(37472, 37779), Name: "migrations/20170613122551-auth-token.sql"}, + {Data: dataRange(37779, 38010), Name: "migrations/20170619123628-add-constraints.sql"}, + {Data: dataRange(38010, 38153), Name: "migrations/20170619164449-bobby-tables.sql"}, + {Data: dataRange(38153, 38590), Name: "migrations/20170620104459-contact-constraints.sql"}, + {Data: dataRange(38590, 41425), Name: "migrations/20170621141923-notification-query-fixes.sql"}, + {Data: dataRange(41425, 41699), Name: "migrations/20170621170744-add-country-code.sql"}, + {Data: dataRange(41699, 42497), Name: "migrations/20170623151348-on-call-alert-distinct.sql"}, + {Data: dataRange(42497, 42876), Name: "migrations/20170623155346-delete-keys-with-service.sql"}, + {Data: dataRange(42876, 42997), Name: "migrations/20170629104138-escalation-policy-tweak.sql"}, + {Data: dataRange(42997, 43546), Name: "migrations/20170630095448-integration-to-integration-keys.sql"}, + {Data: dataRange(43546, 45322), Name: "migrations/20170706102439-esc-zero-index.sql"}, + {Data: dataRange(45322, 45613), Name: "migrations/20170707135355-esc-cascade-steps-actions.sql"}, + {Data: dataRange(45613, 45729), Name: "migrations/20170707153545-limit-cm-per-interval.sql"}, + {Data: dataRange(45729, 46652), Name: "migrations/20170710155447-fix-escalations.sql"}, + {Data: dataRange(46652, 60985), Name: "migrations/20170712094434-notification-policy-updates.sql"}, + {Data: dataRange(60985, 62034), Name: "migrations/20170713113728-escalation-schema-hardening.sql"}, + {Data: dataRange(62034, 64019), Name: "migrations/20170714155817-notification-rule-tweak.sql"}, + {Data: dataRange(64019, 64180), Name: "migrations/20170717151241-remove-old-esc-columns.sql"}, + {Data: dataRange(64180, 65238), Name: "migrations/20170717151336-remove-old-service-columns.sql"}, + {Data: dataRange(65238, 65753), Name: "migrations/20170717151358-remove-old-tables.sql"}, + {Data: dataRange(65753, 78645), Name: "migrations/20170717152954-ids-to-uuids.sql"}, + {Data: dataRange(78645, 80552), Name: "migrations/20170724162219-fix-alert-escalations.sql"}, + {Data: dataRange(80552, 80709), Name: "migrations/20170725105059-rotations-shift-length-check.sql"}, + {Data: dataRange(80709, 86047), Name: "migrations/20170725105905-fix-shift-calculation.sql"}, + {Data: dataRange(86047, 87988), Name: "migrations/20170726141849-handle-missing-users.sql"}, + {Data: dataRange(87988, 93335), Name: "migrations/20170726143800-no-oncall-for-future-rotations.sql"}, + {Data: dataRange(93335, 93655), Name: "migrations/20170726155056-twilio-sms-errors.sql"}, + {Data: dataRange(93655, 93981), Name: "migrations/20170726155351-twilio-voice-errors.sql"}, + {Data: dataRange(93981, 94823), Name: "migrations/20170802114735-alert_logs_enum_update.sql"}, + {Data: dataRange(94823, 99730), Name: "migrations/20170802160314-add-timezones.sql"}, + {Data: dataRange(99730, 99978), Name: "migrations/20170808110638-user-email-nullable-allowed.sql"}, + {Data: dataRange(99978, 101623), Name: "migrations/20170811110036-add-generic-integration-key.sql"}, + {Data: dataRange(101623, 108314), Name: "migrations/20170817102712-atomic-escalation-policies.sql"}, + {Data: dataRange(108314, 108458), Name: "migrations/20170818135106-add-gravatar-col-to-user.sql"}, + {Data: dataRange(108458, 109936), Name: "migrations/20170825124926-escalation-policy-step-reorder.sql"}, + {Data: dataRange(109936, 111739), Name: "migrations/20171024114842-adjust-notification-create-at-check.sql"}, + {Data: dataRange(111739, 113324), Name: "migrations/20171027145352-dont-notify-disabled-cms.sql"}, + {Data: dataRange(113324, 120715), Name: "migrations/20171030130758-ev3-drop-views.sql"}, + {Data: dataRange(120715, 121690), Name: "migrations/20171030130759-ev3-schedule-rules.sql"}, + {Data: dataRange(121690, 123041), Name: "migrations/20171030130800-ev3-notification-policy.sql"}, + {Data: dataRange(123041, 124929), Name: "migrations/20171030130801-ev3-escalation-policy-state.sql"}, + {Data: dataRange(124929, 125345), Name: "migrations/20171030130802-ev3-rotations.sql"}, + {Data: dataRange(125345, 126351), Name: "migrations/20171030130804-ev3-assign-schedule-rotations.sql"}, + {Data: dataRange(126351, 127787), Name: "migrations/20171030130806-ev3-add-rotation-ep-action.sql"}, + {Data: dataRange(127787, 128661), Name: "migrations/20171030130810-ev3-notification-logs.sql"}, + {Data: dataRange(128661, 129474), Name: "migrations/20171030130811-ev3-drop-ep-snapshot-trigger.sql"}, + {Data: dataRange(129474, 131255), Name: "migrations/20171030130812-ev3-rotation-state.sql"}, + {Data: dataRange(131255, 132323), Name: "migrations/20171030130813-ev3-throttle-locks.sql"}, + {Data: dataRange(132323, 133610), Name: "migrations/20171030150519-ev3-remove-status-trigger.sql"}, + {Data: dataRange(133610, 134267), Name: "migrations/20171126093536-schedule-rule-processing.sql"}, + {Data: dataRange(134267, 136025), Name: "migrations/20171201104359-structured-alert-logs.sql"}, + {Data: dataRange(136025, 136378), Name: "migrations/20171201104433-add-alert-log-types.sql"}, + {Data: dataRange(136378, 136980), Name: "migrations/20171205125227-twilio-egress-sms-tracking.sql"}, + {Data: dataRange(136980, 137621), Name: "migrations/20171211101108-twilio-egress-voice-tracking.sql"}, + {Data: dataRange(137621, 137732), Name: "migrations/20171213141802-add-alert-source-email.sql"}, + {Data: dataRange(137732, 138752), Name: "migrations/20171220113439-add-alert-dedup-keys.sql"}, + {Data: dataRange(138752, 139258), Name: "migrations/20171221134500-limit-configuration.sql"}, + {Data: dataRange(139258, 140421), Name: "migrations/20171221138101-notification-rule-limit.sql"}, + {Data: dataRange(140421, 141526), Name: "migrations/20171221140906-contact-method-limit.sql"}, + {Data: dataRange(141526, 142629), Name: "migrations/20171221142234-ep-step-limit.sql"}, + {Data: dataRange(142629, 143788), Name: "migrations/20171221142553-ep-step-action-limit.sql"}, + {Data: dataRange(143788, 144941), Name: "migrations/20171221150317-rotation-participant-limit.sql"}, + {Data: dataRange(144941, 146003), Name: "migrations/20171221150825-schedule-rule-limit.sql"}, + {Data: dataRange(146003, 147120), Name: "migrations/20171221150955-integration-key-limit.sql"}, + {Data: dataRange(147120, 148176), Name: "migrations/20171221151358-unacked-alert-limit.sql"}, + {Data: dataRange(148176, 148731), Name: "migrations/20171221162356-case-insenstive-name-constraints.sql"}, + {Data: dataRange(148731, 149926), Name: "migrations/20180103113251-schedule-target-limit.sql"}, + {Data: dataRange(149926, 150585), Name: "migrations/20180104114110-disable-process-alerts-queue.sql"}, + {Data: dataRange(150585, 151266), Name: "migrations/20180104122450-wait-alert-queue-finished.sql"}, + {Data: dataRange(151266, 153980), Name: "migrations/20180104123517-outgoing-messages.sql"}, + {Data: dataRange(153980, 155734), Name: "migrations/20180104124640-ncycle-tick.sql"}, + {Data: dataRange(155734, 156138), Name: "migrations/20180104125444-twilio-sms-multiple-callbacks.sql"}, + {Data: dataRange(156138, 156258), Name: "migrations/20180109114058-email-integration-key.sql"}, + {Data: dataRange(156258, 157807), Name: "migrations/20180110155110-alert-unique-dedup-service.sql"}, + {Data: dataRange(157807, 157946), Name: "migrations/20180117110856-status-update-message-type.sql"}, + {Data: dataRange(157946, 160364), Name: "migrations/20180117115123-alert-status-updates.sql"}, + {Data: dataRange(160364, 162217), Name: "migrations/20180118112019-restrict-cm-to-same-user.sql"}, + {Data: dataRange(162217, 162350), Name: "migrations/20180126162030-heartbeat-auth-log-subject-type.sql"}, + {Data: dataRange(162350, 162940), Name: "migrations/20180126162093-heartbeats.sql"}, + {Data: dataRange(162940, 163476), Name: "migrations/20180126162144-heartbeat-auth-log-data.sql"}, + {Data: dataRange(163476, 163609), Name: "migrations/20180130123755-heartbeat-limit-key.sql"}, + {Data: dataRange(163609, 164728), Name: "migrations/20180130123852-heartbeat-limit.sql"}, + {Data: dataRange(164728, 165617), Name: "migrations/20180201180221-add-verification-code.sql"}, + {Data: dataRange(165617, 169732), Name: "migrations/20180207113632-ep-step-number-consistency.sql"}, + {Data: dataRange(169732, 172568), Name: "migrations/20180207124220-rotation-participant-position-consistency.sql"}, + {Data: dataRange(172568, 174020), Name: "migrations/20180216104945-alerts-split-summary-details.sql"}, + {Data: dataRange(174020, 174150), Name: "migrations/20180228103159-schedule-overrides-limit-key.sql"}, + {Data: dataRange(174150, 176847), Name: "migrations/20180228111204-schedule-overrides.sql"}, + {Data: dataRange(176847, 177302), Name: "migrations/20180313152132-schedule-on-call-users.sql"}, + {Data: dataRange(177302, 183428), Name: "migrations/20180315113303-strict-rotation-state.sql"}, + {Data: dataRange(183428, 183947), Name: "migrations/20180320153326-npcycle-indexes.sql"}, + {Data: dataRange(183947, 185568), Name: "migrations/20180321143255-ep-step-count.sql"}, + {Data: dataRange(185568, 188460), Name: "migrations/20180321145054-strict-ep-state.sql"}, + {Data: dataRange(188460, 189517), Name: "migrations/20180326154252-move-rotation-triggers.sql"}, + {Data: dataRange(189517, 190075), Name: "migrations/20180330110116-move-ep-triggers.sql"}, + {Data: dataRange(190075, 192448), Name: "migrations/20180403113645-fix-rot-part-delete.sql"}, + {Data: dataRange(192448, 192581), Name: "migrations/20180417142940-region-processing.sql"}, + {Data: dataRange(192581, 193174), Name: "migrations/20180517100033-clear-cycles-on-policy-change.sql"}, + {Data: dataRange(193174, 194603), Name: "migrations/20180517135700-policy-reassignment-trigger-fix.sql"}, + {Data: dataRange(194603, 196827), Name: "migrations/20180517210000-auth2.sql"}, + {Data: dataRange(196827, 197112), Name: "migrations/20180517220000-keyring.sql"}, + {Data: dataRange(197112, 197288), Name: "migrations/20180517230000-auth-nonce.sql"}, + {Data: dataRange(197288, 197561), Name: "migrations/20180521124533-UserFavorites.sql"}, + {Data: dataRange(197561, 198185), Name: "migrations/20180710110438-engine-processing-versions.sql"}, + {Data: dataRange(198185, 198547), Name: "migrations/20180720121433-increment-module-versions.sql"}, + {Data: dataRange(198547, 199244), Name: "migrations/20180720121533-drop-dedup-trigger.sql"}, + {Data: dataRange(199244, 200439), Name: "migrations/20180720121633-drop-description-col.sql"}, + {Data: dataRange(200439, 202856), Name: "migrations/20180720121733-fix-svc-ep-state-trigger.sql"}, + {Data: dataRange(202856, 205286), Name: "migrations/20180720121833-create-ep-state-on-alert.sql"}, + {Data: dataRange(205286, 205562), Name: "migrations/20180720121933-store-next-escalation-time.sql"}, + {Data: dataRange(205562, 206041), Name: "migrations/20180720122033-ep-step-on-call.sql"}, + {Data: dataRange(206041, 206688), Name: "migrations/20180720122133-clear-next-esc-on-ack.sql"}, + {Data: dataRange(206688, 206965), Name: "migrations/20180720122233-drop-unique-cycles-constraint.sql"}, + {Data: dataRange(206965, 207418), Name: "migrations/20180720122333-fix-schedule-index.sql"}, + {Data: dataRange(207418, 208766), Name: "migrations/20180720122433-trig-alert-on-force-escalation.sql"}, + {Data: dataRange(208766, 209365), Name: "migrations/20180720122533-drop-ep-state-np-trig.sql"}, + {Data: dataRange(209365, 210930), Name: "migrations/20180720122633-update-existing-escalations.sql"}, + {Data: dataRange(210930, 211500), Name: "migrations/20180728150427-add-provider-msg-id.sql"}, + {Data: dataRange(211500, 212065), Name: "migrations/20180803090205-drop-alert-assignments.sql"}, + {Data: dataRange(212065, 212947), Name: "migrations/20180803090305-drop-alert-escalation-policy-snapshots.sql"}, + {Data: dataRange(212947, 213708), Name: "migrations/20180803090405-drop-notification-logs.sql"}, + {Data: dataRange(213708, 214432), Name: "migrations/20180803090505-drop-process-alerts.sql"}, + {Data: dataRange(214432, 215046), Name: "migrations/20180803090605-drop-process-rotations.sql"}, + {Data: dataRange(215046, 215658), Name: "migrations/20180803090705-drop-process-schedules.sql"}, + {Data: dataRange(215658, 216749), Name: "migrations/20180803090805-drop-sent-notifications.sql"}, + {Data: dataRange(216749, 217098), Name: "migrations/20180803090905-drop-throttle.sql"}, + {Data: dataRange(217098, 218056), Name: "migrations/20180803091005-drop-user-contact-method-locks.sql"}, + {Data: dataRange(218056, 218659), Name: "migrations/20180803110851-drop-twilio-egress-sms-status.sql"}, + {Data: dataRange(218659, 219301), Name: "migrations/20180803110859-drop-twilio-egress-voice-status.sql"}, + {Data: dataRange(219301, 219495), Name: "migrations/20180806092512-incr-message-version.sql"}, + {Data: dataRange(219495, 219837), Name: "migrations/20180806102513-drop-twilio-voice-callbacks.sql"}, + {Data: dataRange(219837, 220304), Name: "migrations/20180806102620-drop-user-notification-cycles.sql"}, + {Data: dataRange(220304, 221058), Name: "migrations/20180806102708-drop-auth-github-users.sql"}, + {Data: dataRange(221058, 221383), Name: "migrations/20180806102923-drop-auth-token-codes.sql"}, + {Data: dataRange(221383, 221773), Name: "migrations/20180816094955-switchover-state.sql"}, + {Data: dataRange(221773, 223970), Name: "migrations/20180816095055-add-row-ids.sql"}, + {Data: dataRange(223970, 238303), Name: "migrations/20180816095155-change-log.sql"}, + {Data: dataRange(238303, 238525), Name: "migrations/20180816164203-drop-end-time-check.sql"}, + {Data: dataRange(238525, 238757), Name: "migrations/20180821150330-deferable-status-cm.sql"}, + {Data: dataRange(238757, 239305), Name: "migrations/20180822153707-defer-rotation-state.sql"}, + {Data: dataRange(239305, 239505), Name: "migrations/20180822153914-defer-ep-state.sql"}, + {Data: dataRange(239505, 240050), Name: "migrations/20180831132457-user-last-alert-log-indexes.sql"}, + {Data: dataRange(240050, 240269), Name: "migrations/20180831132707-alerts-service-index.sql"}, + {Data: dataRange(240269, 240509), Name: "migrations/20180831132743-np-cycle-alert-index.sql"}, + {Data: dataRange(240509, 241256), Name: "migrations/20180831132927-alert-logs-index.sql"}, + {Data: dataRange(241256, 241656), Name: "migrations/20180831143308-outgoing-messages-index.sql"}, + {Data: dataRange(241656, 242472), Name: "migrations/20180907111203-schedule-rule-endtime-fix.sql"}, + {Data: dataRange(242472, 242833), Name: "migrations/20180918102226-add-service-label.sql"}, + {Data: dataRange(242833, 243211), Name: "migrations/20181004032148-labels-switchover-trigger.sql"}, + {Data: dataRange(243211, 245209), Name: "migrations/20181004145558-fix-deleting-participants.sql"}, + {Data: dataRange(245209, 246444), Name: "migrations/20181008111401-twilio-sms-short-reply.sql"}, + {Data: dataRange(246444, 248612), Name: "migrations/20181018131939-fix-rotation-deletions.sql"}, + {Data: dataRange(248612, 249015), Name: "migrations/20181107133329-notification-channels.sql"}, + {Data: dataRange(249015, 249938), Name: "migrations/20181107155035-nc-id-to-ep-action.sql"}, + {Data: dataRange(249938, 250545), Name: "migrations/20181107155229-om-notification-channel.sql"}, + {Data: dataRange(250545, 250906), Name: "migrations/20190117130422-notif-chan-engine-versions.sql"}, + {Data: dataRange(250906, 251157), Name: "migrations/20190129110250-add-cleanup-module.sql"}, + {Data: dataRange(251157, 251527), Name: "migrations/20190201104727-alert-logs-channel.sql"}, + {Data: dataRange(251527, 252332), Name: "migrations/20190201142137-drop-sub-constraint.sql"}, + {Data: dataRange(252332, 252740), Name: "migrations/20190225112925-config-table.sql"}, + {Data: dataRange(252740, 253205), Name: "migrations/20190312153204-slack-api-change.sql"}, + {Data: dataRange(253205, 253408), Name: "migrations/20190313125552-slack-user-link.sql"}, + {Data: dataRange(253408, 253724), Name: "migrations/20190404105850-nc-no-meta.sql"}, + {Data: dataRange(253724, 254211), Name: "migrations/20190517144224-trigger-config-sync.sql"}, + } +} diff --git a/migrate/inline_types_gen.go b/migrate/inline_types_gen.go new file mode 100644 index 0000000000..bbcf4e7298 --- /dev/null +++ b/migrate/inline_types_gen.go @@ -0,0 +1,10 @@ +// Code generated by inliner DO NOT EDIT. + +package migrate + +type File struct { + Name string + Data func() []byte +} + +var Files []File diff --git a/migrate/migrate.go b/migrate/migrate.go new file mode 100644 index 0000000000..90b27bf49a --- /dev/null +++ b/migrate/migrate.go @@ -0,0 +1,342 @@ +package migrate + +//go:generate go run ../devtools/inliner -pkg $GOPACKAGE ./migrations/*.sql + +import ( + "bytes" + "context" + "database/sql" + "github.com/target/goalert/lock" + "github.com/target/goalert/util/log" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + "github.com/lib/pq" + "github.com/pkg/errors" + "github.com/rubenv/sql-migrate/sqlparse" +) + +// Names will return all AssetNames without the timestamps and extensions +func Names() []string { + uniq := make(map[string]struct{}) + var names []string + // Strip off "migrations/timestamp" and ".sql" file extension + for _, b := range Files { + name := migrationName(b.Name) + if _, ok := uniq[name]; ok { + panic("duplicate migation name " + name) + } + uniq[name] = struct{}{} + + names = append(names, migrationName(b.Name)) + } + return names +} + +func migrationName(file string) string { + file = strings.TrimPrefix(file, "migrations/") + // trim the timestamp, including the trailing hyphen + // Example : 20170808110638-user-email.sql would become user-email.sql + file = file[15:] + file = strings.TrimSuffix(file, ".sql") + return file +} +func migrationID(name string) (int, string) { + for i, b := range Files { + if migrationName(b.Name) == name { + return i, strings.TrimPrefix(b.Name, "migrations/") + } + } + return -1, "" +} + +// ApplyAll will atomically perform all UP migrations. +func ApplyAll(ctx context.Context, db *sql.DB) (int, error) { + return Up(ctx, db, "") +} + +func getConn(ctx context.Context, db *sql.DB) (*sql.Conn, error) { + c, err := db.Conn(ctx) + if err != nil { + return nil, errors.Wrap(err, "get db conn") + } + + _, err = c.ExecContext(ctx, `set lock_timeout = 15000`) + if err != nil { + releaseConn(c) + return nil, errors.Wrap(err, "set lock timeout") + } + + for { + _, err = c.ExecContext(ctx, `select pg_advisory_lock($1)`, lock.GlobalMigrate) + if err == nil { + return c, nil + } + // 55P03 is lock_not_available + // https://www.postgresql.org/docs/9.6/static/errcodes-appendix.html + // + // If the lock gets a timeout, terminate stale backends and try again. + if pErr, ok := err.(*pq.Error); ok && pErr.Code == "55P03" { + log.Log(ctx, errors.Wrap(err, "get migration lock (will retry)")) + _, err = c.ExecContext(ctx, ` + select pg_terminate_backend(l.pid) + from pg_locks l + join pg_stat_activity act on act.pid = l.pid and state = 'idle' and state_change < now() - '30 seconds'::interval + where locktype = 'advisory' and objid = $1 and granted + `, lock.GlobalMigrate) + if err != nil { + releaseConn(c) + return nil, errors.Wrap(err, "terminate stale backends") + } + continue + } + + releaseConn(c) + return nil, errors.Wrap(err, "get migration lock") + } + +} +func releaseConn(c *sql.Conn) { + c.ExecContext(context.Background(), `select pg_advisory_unlock($1)`, lock.GlobalMigrate) + c.ExecContext(context.Background(), `set lock_timeout to default`) + c.Close() +} + +func ensureTableQuery(ctx context.Context, db *sql.DB, fn func() error) error { + err := fn() + if err == nil { + return nil + } + // 42P01 is undefined_table + // https://www.postgresql.org/docs/9.6/static/errcodes-appendix.html + if pErr, ok := err.(*pq.Error); !ok || pErr.Code != "42P01" { + return err + } + + c, err := getConn(ctx, db) + if err != nil { + return err + } + defer releaseConn(c) + _, err = c.ExecContext(ctx, ` + CREATE TABLE IF NOT EXISTS gorp_migrations ( + id text PRIMARY KEY, + applied_at timestamp with time zone + ) + `) + if err != nil { + return err + } + return fn() +} + +// Up will apply all migrations up to, and including, targetName. +// If targetName is empty, all available migrations are applied. +func Up(ctx context.Context, db *sql.DB, targetName string) (int, error) { + if targetName == "" { + targetName = migrationName(Files[len(Files)-1].Name) + } + targetIndex, targetID := migrationID(targetName) + if targetIndex == -1 { + return 0, errors.Errorf("unknown migration target name '%s'", targetName) + } + + var hasLatest bool + err := ensureTableQuery(ctx, db, func() error { + return db.QueryRowContext(ctx, `select true from gorp_migrations where id = $1`, targetID).Scan(&hasLatest) + }) + if err == nil && hasLatest { + return 0, nil + } + if err != nil && err != sql.ErrNoRows { + return 0, err + } + + migrations, err := parseMigrations() + if err != nil { + return 0, err + } + + c, err := getConn(ctx, db) + if err != nil { + return 0, err + } + defer releaseConn(c) + + rows, err := c.QueryContext(ctx, `select id from gorp_migrations order by id`) + if err != nil && err != sql.ErrNoRows { + return 0, err + } + defer rows.Close() + + i := -1 + for rows.Next() { + i++ + var id string + err = rows.Scan(&id) + if err != nil { + return 0, errors.Wrap(err, "scan applied migrations") + } + if strings.TrimPrefix(Files[i].Name, "migrations/") != id { + return 0, errors.Errorf("migration mismatch db has '%s' but expected '%s'", id, strings.TrimPrefix(Files[i].Name, "migrations/")) + } + } + + return performMigrations(ctx, c, true, migrations[i+1:targetIndex+1]) +} + +// Down will roll back all migrations up to, but NOT including, targetName. +// +// If the DB contains unknown migrations, err is returned. +func Down(ctx context.Context, db *sql.DB, targetName string) (int, error) { + targetIndex, targetID := migrationID(targetName) + if targetIndex == -1 { + return 0, errors.Errorf("unknown migration target name '%s'", targetName) + } + + var latest string + err := ensureTableQuery(ctx, db, func() error { + return db.QueryRowContext(ctx, `select id from gorp_migrations order by id desc limit 1`).Scan(&latest) + }) + if err != nil { + return 0, err + } + if latest == targetID { + return 0, nil + } + + migrations, err := parseMigrations() + if err != nil { + return 0, err + } + byID := make(map[string]migration) + for _, m := range migrations { + byID[m.ID] = m + } + + c, err := getConn(ctx, db) + if err != nil { + return 0, err + } + defer releaseConn(c) + rows, err := c.QueryContext(ctx, `select id from gorp_migrations where id > $1 order by id desc`, targetID) + if err != nil { + return 0, err + } + defer rows.Close() + migrations = migrations[:0] + for rows.Next() { + var id string + err = rows.Scan(&id) + if err != nil { + return 0, err + } + m, ok := byID[id] + if !ok { + return 0, errors.Errorf("could not find db migration '%s' to roll back", id) + } + migrations = append(migrations, m) + } + + return performMigrations(ctx, c, false, migrations) +} + +// DumpMigrations will attempt to write all migration files to the specified directory +func DumpMigrations(dest string) error { + for _, file := range Files { + fullPath := filepath.Join(dest, filepath.Base(file.Name)) + os.MkdirAll(filepath.Dir(fullPath), 0755) + err := ioutil.WriteFile(fullPath, file.Data(), 0644) + if err != nil { + return errors.Wrapf(err, "write to %s", fullPath) + } + } + return nil +} + +type migration struct { + ID string + Name string + *sqlparse.ParsedMigration +} + +func parseMigrations() ([]migration, error) { + var migrations []migration + var err error + for _, file := range Files { + var m migration + m.ID = strings.TrimPrefix(file.Name, "migrations/") + m.Name = migrationName(file.Name) + m.ParsedMigration, err = sqlparse.ParseMigration(bytes.NewReader(file.Data())) + if err != nil { + return nil, errors.Wrapf(err, "parse %s", m.ID) + } + + migrations = append(migrations, m) + } + return migrations, nil +} + +func (m migration) apply(ctx context.Context, c *sql.Conn, up bool) (err error) { + var tx *sql.Tx + type execer interface { + ExecContext(context.Context, string, ...interface{}) (sql.Result, error) + } + s := time.Now() + typ := "UP" + if !up { + typ = "DOWN" + } + ex := execer(c) + if up && !m.DisableTransactionUp || !up && !m.DisableTransactionDown { + tx, err = c.BeginTx(ctx, nil) + if err != nil { + return err + } + defer tx.Rollback() + ex = tx + } + + stmts := m.UpStatements + if !up { + stmts = m.DownStatements + } + for _, s := range stmts { + _, err = ex.ExecContext(ctx, s) + if err != nil { + return err + } + } + + if up { + _, err = ex.ExecContext(ctx, `insert into gorp_migrations (id, applied_at) values ($1, now())`, m.ID) + } else { + _, err = ex.ExecContext(ctx, `delete from gorp_migrations where id = $1`, m.ID) + } + if err != nil { + return err + } + + if tx != nil { + err = tx.Commit() + if err != nil { + return err + } + } + + log.Debugf(ctx, "Applied %s migration '%s' in %s", typ, m.Name, time.Since(s).Truncate(time.Millisecond)) + + return nil +} +func performMigrations(ctx context.Context, c *sql.Conn, applyUp bool, migrations []migration) (int, error) { + for i, m := range migrations { + err := m.apply(ctx, c, applyUp) + if err != nil { + return i, err + } + } + return len(migrations), nil +} diff --git a/migrate/migrations/20170426134008-init.sql b/migrate/migrations/20170426134008-init.sql new file mode 100644 index 0000000000..dc4f99ae99 --- /dev/null +++ b/migrate/migrations/20170426134008-init.sql @@ -0,0 +1,217 @@ +-- +migrate Up + +CREATE TABLE goalert_user ( + id UUID PRIMARY KEY, + bio TEXT, + first_name TEXT, + last_name TEXT, + login TEXT UNIQUE, + email TEXT UNIQUE, + email_verified BOOLEAN NOT NULL default false, + role TEXT, + schedule_color TEXT, + time_zone TEXT, + title TEXT +); +-- type can be PUSH, EMAIL, VOICE, SMS +-- carrier can be ATT, VERIZON, SPRINT, TMOBILE, FI, or NULL, if set is used to send SMS via email +CREATE TABLE contact ( + id TEXT PRIMARY KEY, + name TEXT, + type TEXT, + value TEXT, + carrier TEXT, + opt_out BOOLEAN DEFAULT false, + user_id UUID REFERENCES goalert_user (id) +); +CREATE TABLE team ( + id TEXT PRIMARY KEY, + description TEXT, + name TEXT +); +CREATE TABLE team_user ( + id TEXT PRIMARY KEY, + team_id TEXT REFERENCES team(id), + user_id UUID REFERENCES goalert_user (id) +); +CREATE TABLE escalation_policy ( + id TEXT PRIMARY KEY, + description TEXT, + name TEXT, + repeat INTEGER, + team_id TEXT REFERENCES team(id) +); +-- urgency_rule can be HIGH, LOW, HIGH_LOW, LOW_HIGH +CREATE TABLE service ( + id TEXT PRIMARY KEY, + created_at TIMESTAMPTZ, + description TEXT, + summary TEXT, + type TEXT, + self TEXT, + html_url TEXT, + status TEXT, + last_incident_timestamp TIMESTAMPTZ, + conference_url TEXT, + dialin_number TEXT, + name TEXT, + acknowledgement_timeout INTEGER, + auto_resolve_timeout INTEGER, + maintenance_mode BOOLEAN, + escalation_policy_id TEXT REFERENCES escalation_policy(id), + incident_urgency_type TEXT, + incident_urgency_value TEXT +); + +CREATE SEQUENCE IF NOT EXISTS incident_number_seq; +-- event_type can be TRIGGER, ACKNOWLEDGE or RESOLVE +-- URGENCY can be HIGH or LOW +CREATE TABLE incident ( + id TEXT PRIMARY KEY, + number INTEGER DEFAULT NEXTVAL('incident_number_seq'), + key TEXT, -- this functions as an alias for identifying external systems for dedupe purposes + event_type TEXT, + created_at TIMESTAMPTZ, + description TEXT, + details JSON, + client TEXT, + client_url TEXT, + contexts JSON, + status TEXT, + urgency TEXT, + resolution TEXT, + try_count INTEGER, + escalation_level INTEGER, + service_id TEXT REFERENCES service(id), + escalation_policy_id TEXT REFERENCES escalation_policy(id) +); +CREATE TABLE incident_assignment ( + id TEXT PRIMARY KEY, + assigned_by TEXT, -- system or escalation_policy + user_id UUID REFERENCES goalert_user(id), + incident_id TEXT REFERENCES incident(id) +); +CREATE TABLE alert ( + id TEXT PRIMARY KEY, + created_at TIMESTAMPTZ, + phone_number TEXT, + channel TEXT, + acknowledge_key INT, + resolve_key INT, + status TEXT, + incident_ids TEXT ARRAY, + user_id UUID REFERENCES goalert_user(id) +); +CREATE TABLE maintenance ( + id TEXT PRIMARY KEY, + start_at TIMESTAMPTZ, + end_at TIMESTAMPTZ, + description TEXT, + created_by UUID REFERENCES goalert_user(id) +); +CREATE TABLE service_maintenance ( + id TEXT PRIMARY KEY, + service_id TEXT REFERENCES service(id), + maintenance_id TEXT REFERENCES maintenance(id) +); +CREATE TABLE escalation_policy_step ( + id TEXT PRIMARY KEY, + delay INTEGER, + step_number INTEGER, + escalation_policy_id TEXT REFERENCES escalation_policy(id) +); +--state can be TRIGGERED, ACKNOWLEDGED, RESOLVED +--action can be TRIGGERED, ACKNOWLEDGED, RESOLVED, ESCALATED, NOTIFIED, ASSIGNED +CREATE TABLE incident_log ( + id TEXT PRIMARY KEY, + created_at TIMESTAMPTZ, + state TEXT, + action TEXT, + incident_id TEXT REFERENCES incident(id) +); +CREATE TABLE schedule ( + id TEXT PRIMARY KEY, + created_at TIMESTAMPTZ, + description TEXT, + name TEXT, + time_zone INTEGER -- hours east of UTC, e.g. -6 for CST +); +CREATE TABLE schedule_layer ( + id TEXT PRIMARY KEY, + created_at TIMESTAMPTZ, + effective_date TIMESTAMP, + description TEXT, + handoff_day INTEGER, -- day, 0 -> 6 + handoff_time TEXT, -- start time, 00:00 -> 23:30 + name TEXT, + rotation_type TEXT, -- daily, weekly, or custom + shift_length INTEGER, -- for custom shift length amount + shift_length_unit TEXT, -- for custom shift length units (hours, days, weeks) + schedule_id TEXT REFERENCES schedule(id) +); +CREATE TABLE schedule_layer_user ( + id TEXT PRIMARY KEY, + created_at TIMESTAMPTZ, + step_number INTEGER, -- starts at 0 + user_id UUID REFERENCES goalert_user(id), + schedule_layer_id TEXT REFERENCES schedule_layer(id) +); +CREATE TABLE escalation_policy_action ( + id TEXT PRIMARY KEY, + escalation_policy_step_id TEXT REFERENCES escalation_policy_step(id), + type_id TEXT, --user or schedule id + type_text TEXT --can be user_reference or schedule_reference +); +CREATE TABLE integration ( + id TEXT PRIMARY KEY, + type TEXT, -- EMAIL, API, or CUSTOM + name TEXT, -- give it a label + integration_key TEXT UNIQUE, -- the actual key, or email address for type EMAIL + created_at TIMESTAMPTZ, -- auto generated by system + service_id TEXT REFERENCES service(id) +); +CREATE TABLE notification_rule ( + id TEXT PRIMARY KEY, + delay INTEGER, + user_id UUID REFERENCES goalert_user (id), + contact_id TEXT REFERENCES contact(id) +); + +CREATE TABLE auth_basic_users ( + user_id UUID REFERENCES goalert_user (id) ON DELETE CASCADE PRIMARY KEY, + username text UNIQUE NOT NULL, + password_hash text NOT NULL +); + +CREATE TABLE auth_github_users ( + user_id UUID REFERENCES goalert_user (id) ON DELETE CASCADE PRIMARY KEY, + github_id text UNIQUE NOT NULL +); + +-- +migrate Down + +DROP TABLE IF EXISTS notification_rule; +DROP TABLE IF EXISTS integration; +DROP TABLE IF EXISTS escalation_policy_action; +DROP TABLE IF EXISTS schedule_layer_user; +DROP TABLE IF EXISTS schedule_layer; +DROP TABLE IF EXISTS schedule; +DROP TABLE IF EXISTS service_maintenance; +DROP TABLE IF EXISTS maintenance; +DROP TABLE IF EXISTS incident_log; +DROP TABLE IF EXISTS escalation_policy_step; +DROP TABLE IF EXISTS alert; +DROP TABLE IF EXISTS incident_assignment; +DROP TABLE IF EXISTS incident; +DROP SEQUENCE IF EXISTS incident_number_seq; +DROP TABLE IF EXISTS service; +DROP TABLE IF EXISTS escalation_policy; +DROP TABLE IF EXISTS user_role; +DROP TABLE IF EXISTS goalert_role; +DROP TABLE IF EXISTS team_user; +DROP TABLE IF EXISTS team; +DROP TABLE IF EXISTS contact; +DROP TABLE IF EXISTS auth_basic_users; +DROP TABLE IF EXISTS auth_github_users; +DROP TABLE IF EXISTS goalert_user CASCADE; + diff --git a/migrate/migrations/20170428154209-users-table.sql b/migrate/migrations/20170428154209-users-table.sql new file mode 100644 index 0000000000..48be8bbd80 --- /dev/null +++ b/migrate/migrations/20170428154209-users-table.sql @@ -0,0 +1,73 @@ + +-- +migrate Up + +DROP TYPE IF EXISTS enum_user_role; +CREATE TYPE enum_user_role as ENUM ( + 'unknown', + 'user', + 'admin' +); + +ALTER TABLE goalert_user + RENAME TO users; + +UPDATE users + SET bio = CASE + WHEN bio IS NULL THEN '' + ELSE bio + END; +ALTER TABLE users ALTER COLUMN bio SET NOT NULL; +ALTER TABLE users ALTER COLUMN bio SET DEFAULT ''; + +UPDATE users + SET role = CASE + WHEN role IS NULL THEN 'user' + WHEN role='' THEN 'user' + ELSE role + END; +ALTER TABLE users ALTER COLUMN role TYPE enum_user_role USING role::enum_user_role; +ALTER TABLE users ALTER COLUMN role SET NOT NULL; +ALTER TABLE users ALTER COLUMN role SET DEFAULT 'unknown'::enum_user_role; + +ALTER TABLE users ALTER COLUMN email SET NOT NULL; + +ALTER TABLE users DROP COLUMN login; +ALTER TABLE users DROP COLUMN schedule_color; +ALTER TABLE users DROP COLUMN time_zone; +ALTER TABLE users DROP COLUMN title; + +ALTER TABLE users ADD COLUMN name TEXT; +UPDATE users SET name = LTRIM(CASE + WHEN first_name IS NULL THEN '' + ELSE first_name + END||CASE + WHEN last_name IS NULL THEN '' + WHEN last_name='' THEN '' + ELSE ' '||last_name + END); +ALTER TABLE users ALTER COLUMN name SET NOT NULL; + +ALTER TABLE users DROP COLUMN last_name; +ALTER TABLE users DROP COLUMN first_name; + + +-- +migrate Down + +ALTER TABLE users RENAME TO goalert_user; + +ALTER TABLE goalert_user ALTER COLUMN bio DROP NOT NULL; +ALTER TABLE goalert_user ALTER COLUMN role DROP NOT NULL; +ALTER TABLE goalert_user ALTER COLUMN role TYPE text USING role::text; +ALTER TABLE goalert_user ALTER COLUMN role SET DEFAULT 'user'::text; + +ALTER TABLE goalert_user ADD COLUMN login TEXT UNIQUE; +ALTER TABLE goalert_user ADD COLUMN schedule_color TEXT; +ALTER TABLE goalert_user ADD COLUMN time_zone TEXT; +ALTER TABLE goalert_user ADD COLUMN title TEXT; +ALTER TABLE goalert_user ADD COLUMN last_name TEXT DEFAULT ''; +ALTER TABLE goalert_user ADD COLUMN first_name TEXT DEFAULT ''; + +UPDATE goalert_user SET first_name = name; +ALTER TABLE goalert_user DROP COLUMN name; + +DROP TYPE enum_user_role; diff --git a/migrate/migrations/20170502172843-user-settings.sql b/migrate/migrations/20170502172843-user-settings.sql new file mode 100644 index 0000000000..583cae61be --- /dev/null +++ b/migrate/migrations/20170502172843-user-settings.sql @@ -0,0 +1,79 @@ + +-- +migrate Up + +DROP TYPE IF EXISTS enum_user_contact_method_type; +CREATE TYPE enum_user_contact_method_type as ENUM ( + 'PUSH', + 'EMAIL', + 'VOICE', + 'SMS' +); + +DROP TYPE IF EXISTS enum_user_contact_method_carrier; +CREATE TYPE enum_user_contact_method_carrier as ENUM ( + 'ATT', + 'VERIZON', + 'SPRINT', + 'TMOBILE', + 'FI' +); + +CREATE TABLE user_contact_methods ( + id UUID PRIMARY KEY, + name TEXT NOT NULL, + type enum_user_contact_method_type NOT NULL, + value TEXT NOT NULL, + carrier enum_user_contact_method_carrier, + disabled BOOLEAN NOT NULL DEFAULT false, + user_id UUID NOT NULL REFERENCES users (id) ON DELETE CASCADE +); + +CREATE TABLE user_notification_rules ( + id UUID PRIMARY KEY, + delay INT NOT NULL DEFAULT 0, + contact_method_id UUID NOT NULL REFERENCES user_contact_methods (id) ON DELETE CASCADE, + user_id UUID NOT NULL REFERENCES users (id) ON DELETE CASCADE +); + + +CREATE EXTENSION IF NOT EXISTS "pgcrypto"; + +INSERT INTO user_contact_methods (id, name, type, value, carrier, disabled, user_id) + SELECT id::UUID, name, type::enum_user_contact_method_type, value, carrier::enum_user_contact_method_carrier, opt_out, user_id FROM contact; + +INSERT INTO user_notification_rules (id, delay, contact_method_id, user_id) + SELECT id::UUID, delay, contact_id::UUID, notification_rule.user_id FROM notification_rule; + +DROP TABLE notification_rule; +DROP TABLE contact; + +-- +migrate Down + +CREATE TABLE contact ( + id TEXT PRIMARY KEY, + name TEXT, + type TEXT, + value TEXT, + carrier TEXT, + opt_out BOOLEAN DEFAULT false, + user_id UUID REFERENCES users (id) +); + +INSERT INTO contact (id, name, type, value, carrier, opt_out, user_id) + SELECT id::TEXT, name, type::TEXT, value, carrier::TEXT, disabled, user_id FROM user_contact_methods; + +CREATE TABLE notification_rule ( + id TEXT PRIMARY KEY, + delay INTEGER, + user_id UUID REFERENCES users (id), + contact_id TEXT REFERENCES contact(id) +); + +INSERT INTO notification_rule (id, delay, user_id, contact_id) + SELECT id::TEXT, delay, user_id, contact_method_id::TEXT FROM user_notification_rules; + +DROP TABLE user_notification_rules; +DROP TABLE user_contact_methods; + +DROP TYPE enum_user_contact_method_carrier; +DROP TYPE enum_user_contact_method_type; diff --git a/migrate/migrations/20170503144542-remove-carrier.sql b/migrate/migrations/20170503144542-remove-carrier.sql new file mode 100644 index 0000000000..b4bc3dc53a --- /dev/null +++ b/migrate/migrations/20170503144542-remove-carrier.sql @@ -0,0 +1,17 @@ + +-- +migrate Up + +ALTER TABLE user_contact_methods DROP COLUMN carrier; +DROP TYPE enum_user_contact_method_carrier; + +-- +migrate Down + +CREATE TYPE enum_user_contact_method_carrier as ENUM ( + 'ATT', + 'VERIZON', + 'SPRINT', + 'TMOBILE', + 'FI' +); + +ALTER TABLE user_contact_methods ADD COLUMN carrier enum_user_contact_method_carrier; diff --git a/migrate/migrations/20170503144821-remove-email-verified.sql b/migrate/migrations/20170503144821-remove-email-verified.sql new file mode 100644 index 0000000000..472f77efd1 --- /dev/null +++ b/migrate/migrations/20170503144821-remove-email-verified.sql @@ -0,0 +1,8 @@ + +-- +migrate Up + +ALTER TABLE users DROP COLUMN email_verified; + +-- +migrate Down + +ALTER TABLE users ADD COLUMN email_verified BOOLEAN NOT NULL DEFAULT false; diff --git a/migrate/migrations/20170503154907-delay-minutes.sql b/migrate/migrations/20170503154907-delay-minutes.sql new file mode 100644 index 0000000000..73abae0bae --- /dev/null +++ b/migrate/migrations/20170503154907-delay-minutes.sql @@ -0,0 +1,6 @@ + +-- +migrate Up +ALTER TABLE user_notification_rules RENAME COLUMN delay TO delay_minutes; + +-- +migrate Down +ALTER TABLE user_notification_rules RENAME COLUMN delay_minutes TO delay; diff --git a/migrate/migrations/20170509154250-alerts.sql b/migrate/migrations/20170509154250-alerts.sql new file mode 100644 index 0000000000..78013c44cf --- /dev/null +++ b/migrate/migrations/20170509154250-alerts.sql @@ -0,0 +1,134 @@ + +-- +migrate Up + +CREATE EXTENSION IF NOT EXISTS pgcrypto; + +CREATE TYPE enum_alert_status as ENUM ( + 'triggered', + 'active', + 'closed' +); + +CREATE TYPE enum_alert_source as ENUM ( + 'grafana', + 'manual' +); + +CREATE TYPE enum_alert_log_event as ENUM ( + 'created', + 'reopened', + 'status_changed', + 'assignment_changed', + 'escalated', + 'closed' +); + +CREATE TABLE alerts ( + id BIGSERIAL PRIMARY KEY, + description TEXT NOT NULL, + service_id TEXT REFERENCES service (id) ON DELETE CASCADE, + source enum_alert_source NOT NULL DEFAULT 'manual'::enum_alert_source, + status enum_alert_status NOT NULL DEFAULT 'triggered'::enum_alert_status, + + escalation_level INT NOT NULL DEFAULT 0, + last_escalation TIMESTAMP DEFAULT now() +); + +CREATE TABLE alert_logs ( + id BIGSERIAL PRIMARY KEY, + alert_id BIGINT REFERENCES alerts (id) ON DELETE CASCADE, + timestamp TIMESTAMP DEFAULT now(), + event enum_alert_log_event NOT NULL, + message TEXT NOT NULL +); + +CREATE VIEW alert_escalation_levels AS + SELECT alerts.id AS alert_id, + count(step.id) AS levels, + ((alerts.escalation_level + 1) % count(step.id)) as relative_level + FROM alerts,escalation_policy_step step,service + WHERE step.escalation_policy_id = service.escalation_policy_id + AND service.id = alerts.service_id + GROUP BY alerts.id; + +INSERT INTO alerts (description, service_id, last_escalation, status) + SELECT description, service_id, created_at, 'active'::enum_alert_status FROM incident; + + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION log_alert_status_changed_insert() RETURNS TRIGGER AS + $$ + BEGIN + IF NEW.status = 'closed'::enum_alert_status THEN + INSERT INTO alert_logs (alert_id, event, message) VALUES ( + NEW.id, 'closed'::enum_alert_log_event, 'Closed' + ); + ELSIF OLD.status = 'closed'::enum_alert_status THEN + INSERT INTO alert_logs (alert_id, event, message) VALUES ( + NEW.id, 'reopened'::enum_alert_log_event, 'Reopened as '||NEW.status::TEXT + ); + ELSE + INSERT INTO alert_logs (alert_id, event, message) VALUES ( + NEW.id, 'status_changed'::enum_alert_log_event, 'Status updated from '||OLD.status::TEXT||' to '||NEW.status::TEXT + ); + END IF; + RETURN NEW; + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION log_alert_creation_insert() RETURNS TRIGGER AS + $$ + BEGIN + INSERT INTO alert_logs (alert_id, event, message) VALUES ( + NEW.id, 'created'::enum_alert_log_event, 'Created via: '||NEW.source::TEXT + ); + RETURN NEW; + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION escalate_alerts() RETURNS VOID AS + $$ + BEGIN + UPDATE alerts a + SET escalation_level = escalation_level + 1, last_escalation = now() + FROM service s, escalation_policy_step step, alert_escalation_levels lvl, escalation_policy e + WHERE (last_escalation + (step.delay::TEXT||' minutes')::interval) < now() + AND a.status = 'triggered'::enum_alert_status + AND s.id = a.service_id + AND step.escalation_policy_id = s.escalation_policy_id + AND lvl.alert_id = a.id + AND step.step_number = ((a.escalation_level + 1) % lvl.levels) + AND e.id = s.escalation_policy_id + AND (e.repeat = -1 OR escalation_level / lvl.levels < e.repeat); + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +CREATE TRIGGER log_alert_status_changed + AFTER UPDATE ON alerts + FOR EACH ROW + WHEN (OLD.status IS DISTINCT FROM NEW.status) + EXECUTE PROCEDURE log_alert_status_changed_insert(); + +CREATE TRIGGER log_alert_creation + AFTER INSERT ON alerts + FOR EACH ROW + EXECUTE PROCEDURE log_alert_creation_insert(); + +-- +migrate Down + +DROP VIEW alert_steps; + +DROP TABLE alert_logs; +DROP TYPE enum_alert_log_event; + +DROP TABLE alerts; +DROP TYPE enum_alert_source; +DROP TYPE enum_alert_status; + +DROP FUNCTION log_alert_status_changed_insert(); +DROP FUNCTION log_alert_creation_insert(); diff --git a/migrate/migrations/20170515120511-escalation-policy-actions.sql b/migrate/migrations/20170515120511-escalation-policy-actions.sql new file mode 100644 index 0000000000..aebbfa31b4 --- /dev/null +++ b/migrate/migrations/20170515120511-escalation-policy-actions.sql @@ -0,0 +1,37 @@ + +-- +migrate Up + +CREATE TABLE escalation_policy_actions ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + escalation_policy_step_id TEXT NOT NULL REFERENCES escalation_policy_step (id) ON DELETE CASCADE, + schedule_id TEXT REFERENCES schedule (id) ON DELETE CASCADE, + user_id UUID REFERENCES users (id) ON DELETE CASCADE, + + UNIQUE(escalation_policy_step_id, schedule_id, user_id), + CHECK((schedule_id IS NOT NULL AND user_id IS NULL) OR (user_id IS NOT NULL AND schedule_id IS NULL)) +); + +INSERT INTO escalation_policy_actions (id, escalation_policy_step_id, schedule_id, user_id) + SELECT id::UUID, escalation_policy_step_id, + CASE WHEN type_text = 'schedule_reference' THEN type_id ELSE NULL END, + CASE WHEN type_text = 'user_reference' THEN type_id::UUID ELSE NULL END + FROM escalation_policy_action; + +DROP TABLE escalation_policy_action; + +-- +migrate Down + +CREATE TABLE escalation_policy_action ( + id TEXT PRIMARY KEY, + escalation_policy_step_id TEXT REFERENCES escalation_policy_step(id), + type_id TEXT, --user or schedule id + type_text TEXT --can be user_reference or schedule_reference +); + +INSERT INTO escalation_policy_action (id, escalation_policy_step_id, type_id, type_text) + SELECT id::TEXT, escalation_policy_step_id, + CASE WHEN schedule_id IS NULL THEN user_id::TEXT ELSE schedule_id END, + CASE WHEN schedule_id IS NULL THEN 'user_reference' ELSE 'schedule_reference' END + FROM escalation_policy_actions; + +DROP TABLE escalation_policy_actions; diff --git a/migrate/migrations/20170515162554-user-notifications.sql b/migrate/migrations/20170515162554-user-notifications.sql new file mode 100644 index 0000000000..4413e80be0 --- /dev/null +++ b/migrate/migrations/20170515162554-user-notifications.sql @@ -0,0 +1,146 @@ + +-- +migrate Up + +CREATE TABLE notifications ( + user_id UUID PRIMARY KEY REFERENCES users (id) ON DELETE CASCADE, + started_at TIMESTAMP NOT NULL DEFAULT now() +); + + +CREATE TABLE sent_notifications ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + alert_id BIGINT NOT NULL REFERENCES alerts (id) ON DELETE CASCADE, + contact_method_id UUID NOT NULL REFERENCES user_contact_methods (id) ON DELETE CASCADE, + sent_at TIMESTAMP, + UNIQUE(alert_id,contact_method_id) +); + +CREATE VIEW active_contact_methods AS + SELECT users.id as user_id, m.id as contact_method_id + FROM users, user_contact_methods m, user_notification_rules r, notifications n + WHERE m.user_id = users.id + AND r.user_id = users.id + AND n.user_id = users.id + AND r.contact_method_id = m.id + AND ((r.delay_minutes::text||' minutes')::interval + n.started_at) < now(); + +CREATE VIEW triggered_alert_users AS + SELECT action.user_id as user_id, a.id as alert_id + FROM escalation_policy_actions action, escalation_policy_step step, service s, alerts a, alert_escalation_levels lvl + WHERE action.escalation_policy_step_id = step.id + AND step.escalation_policy_id = s.escalation_policy_id + AND step.step_number = lvl.relative_level + AND s.id = a.service_id + AND a.status = 'triggered'::enum_alert_status + GROUP BY a.id, action.user_id; + + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION update_notifications() RETURNS VOID AS + $$ + BEGIN + INSERT INTO notifications (user_id) + SELECT user_id FROM triggered_alert_users + GROUP BY user_id + ON CONFLICT DO NOTHING; + + DELETE FROM notifications WHERE user_id NOT IN (SELECT user_id FROM triggered_alert_users WHERE user_id = user_id); + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION add_notifications() RETURNS TRIGGER AS + $$ + BEGIN + INSERT INTO notifications (user_id) + SELECT user_id FROM triggered_alert_users + WHERE alert_id = NEW.id + LIMIT 1 + ON CONFLICT DO NOTHING; + + DELETE FROM notifications WHERE user_id NOT IN (SELECT user_id FROM triggered_alert_users WHERE user_id = user_id); + RETURN NEW; + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +CREATE TRIGGER add_notifications_alert_changed + AFTER UPDATE OR INSERT ON alerts + FOR EACH ROW + EXECUTE PROCEDURE add_notifications(); + +SELECT update_notifications(); + + +CREATE VIEW needs_notification_sent AS + SELECT trig.alert_id, acm.contact_method_id, cm.type, cm.value, a.description, s.name as service_name + FROM active_contact_methods acm, triggered_alert_users trig, user_contact_methods cm, alerts a, service s + WHERE acm.user_id = trig.user_id + AND acm.user_id = trig.user_id + AND cm.id = acm.contact_method_id + AND cm.disabled = FALSE + AND a.id = trig.alert_id + AND s.id = a.service_id + AND NOT EXISTS ( + SELECT id + FROM sent_notifications + WHERE alert_id = trig.alert_id + AND contact_method_id = acm.contact_method_id + AND sent_at IS NOT NULL + ); + + +CREATE TABLE user_contact_method_locks ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + client_id UUID NOT NULL, + alert_id BIGINT NOT NULL REFERENCES alerts (id) ON DELETE CASCADE, + contact_method_id UUID NOT NULL REFERENCES user_contact_methods (id) ON DELETE CASCADE, + timestamp TIMESTAMP NOT NULL DEFAULT now(), + UNIQUE (alert_id, contact_method_id) +); + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION aquire_user_contact_method_lock(_client_id UUID, _alert_id BIGINT, _contact_method_id UUID) RETURNS UUID AS + $$ + DECLARE + lock_id UUID = gen_random_uuid(); + BEGIN + DELETE FROM user_contact_method_locks WHERE alert_id = _alert_id + AND contact_method_id = _contact_method_id + AND (timestamp + '5 minutes'::interval) < now(); + + INSERT INTO user_contact_method_locks (id, alert_id, contact_method_id, client_id) + VALUES (lock_id, _alert_id, _contact_method_id, _client_id) + RETURNING id INTO lock_id; + + INSERT INTO sent_notifications (id, alert_id, contact_method_id) VALUES (lock_id, _alert_id, _contact_method_id); + + RETURN lock_id; + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION release_user_contact_method_lock(_client_id UUID, _id UUID, success BOOLEAN) RETURNS VOID AS + $$ + BEGIN + DELETE FROM user_contact_method_locks WHERE id = _id AND client_id = _client_id; + IF success + THEN + UPDATE sent_notifications SET sent_at = now() WHERE id = _id; + ELSE + DELETE FROM sent_notifications WHERE id = _id; + END IF; + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + + +-- +migrate Down + +DROP FUNCTION update_notifications(); +DROP VIEW active_contact_methods; +DROP VIEW triggered_alert_users; +DROP TABLE sent_notifications; +DROP TABLE notifications; diff --git a/migrate/migrations/20170518142432-alert-assignments.sql b/migrate/migrations/20170518142432-alert-assignments.sql new file mode 100644 index 0000000000..99411b39a1 --- /dev/null +++ b/migrate/migrations/20170518142432-alert-assignments.sql @@ -0,0 +1,12 @@ + +-- +migrate Up + +CREATE TABLE alert_assignments ( + user_id UUID NOT NULL REFERENCES users (id) ON DELETE CASCADE, + alert_id BIGINT NOT NULL REFERENCES alerts (id) ON DELETE CASCADE, + PRIMARY KEY (user_id, alert_id) +); + +-- +migrate Down + +DROP TABLE alert_assignments; diff --git a/migrate/migrations/20170530135027-schedule-rotation.sql b/migrate/migrations/20170530135027-schedule-rotation.sql new file mode 100644 index 0000000000..fd59b9658b --- /dev/null +++ b/migrate/migrations/20170530135027-schedule-rotation.sql @@ -0,0 +1,349 @@ + +-- +migrate Up +CREATE TABLE schedules ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + name TEXT NOT NULL UNIQUE, + description TEXT NOT NULL DEFAULT '', + time_zone TEXT NOT NULL +); + +INSERT INTO schedules (id, name, description, time_zone) + SELECT id::UUID, name, description, 'America/Chicago' FROM schedule; + +CREATE TYPE enum_rotation_type AS ENUM ( + 'weekly', + 'daily', + 'hourly' +); + +CREATE TABLE rotations ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + schedule_id UUID NOT NULL REFERENCES schedules (id) ON DELETE CASCADE, + name TEXT NOT NULL, + description TEXT NOT NULL DEFAULT '', + type enum_rotation_type NOT NULL, + start_time TIMESTAMPTZ NOT NULL DEFAULT now(), + shift_length BIGINT NOT NULL DEFAULT 1, + UNIQUE (schedule_id, name) +); + +INSERT INTO rotations (id, start_time, name, type, description, shift_length, schedule_id) + SELECT id::UUID, effective_date, name, rotation_type::enum_rotation_type, description, shift_length, schedule_id::UUID FROM schedule_layer; + +CREATE TABLE rotation_participants ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + rotation_id UUID NOT NULL REFERENCES rotations (id) ON DELETE CASCADE, + position INT NOT NULL, + user_id UUID NOT NULL REFERENCES users (id) ON DELETE CASCADE, + UNIQUE (rotation_id, position) DEFERRABLE INITIALLY DEFERRED +); + +INSERT INTO rotation_participants (rotation_id, position, user_id) + SELECT schedule_layer_id::UUID, step_number-1, user_id FROM schedule_layer_user; + + +-- Update escalation_policy_actions references +ALTER TABLE escalation_policy_actions RENAME COLUMN schedule_id TO old_schedule_id; +ALTER TABLE escalation_policy_actions ADD COLUMN schedule_id UUID REFERENCES schedules (id) ON DELETE CASCADE; +UPDATE escalation_policy_actions SET schedule_id = old_schedule_id::UUID WHERE old_schedule_id IS NOT NULL; +ALTER TABLE escalation_policy_actions DROP COLUMN old_schedule_id; +ALTER TABLE escalation_policy_actions ADD UNIQUE(escalation_policy_step_id, schedule_id, user_id); +ALTER TABLE escalation_policy_actions ADD CHECK((schedule_id IS NOT NULL AND user_id IS NULL) OR (user_id IS NOT NULL AND schedule_id IS NULL)); + +DROP TABLE schedule_layer_user; +DROP TABLE schedule_layer; +DROP TABLE schedule; + + +CREATE VIEW on_call AS + WITH rotation_details AS ( + SELECT + id, + schedule_id, + start_time, + + (shift_length::TEXT||CASE + WHEN type='hourly'::enum_rotation_type THEN ' hours' + WHEN type='daily'::enum_rotation_type THEN ' days' + ELSE ' weeks' + END)::interval shift, + + (CASE + WHEN type='hourly'::enum_rotation_type THEN extract(epoch from now()-start_time)/3600 + WHEN type='daily'::enum_rotation_type THEN extract(days from now()-start_time) + ELSE extract(days from now()-start_time)/7 + END/shift_length)::BIGINT shift_number + + FROM rotations + ), p_count AS ( + SELECT count(rp.id) + FROM + rotation_participants rp, + rotation_details d + WHERE rp.rotation_id = d.id + ), + current_participant AS ( + SELECT user_id + FROM + rotation_participants rp, + rotation_details d, + p_count p + WHERE rp.rotation_id = d.id + AND rp.position = d.shift_number % p.count + LIMIT 1 + ), + next_participant AS ( + SELECT user_id + FROM + rotation_participants rp, + rotation_details d, + p_count p + WHERE rp.rotation_id = d.id + AND rp.position = (d.shift_number+1) % p.count + LIMIT 1 + ) + SELECT + d.schedule_id, + d.id rotation_id, + c.user_id, + n.user_id next_user_id, + (d.shift*d.shift_number)+d.start_time start_time, + (d.shift*(d.shift_number+1))+d.start_time end_time, + d.shift_number + FROM + rotation_details d, + current_participant c, + next_participant n; + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION move_rotation_position(_id UUID, _new_pos INT) RETURNS VOID AS + $$ + DECLARE + _old_pos INT; + _rid UUID; + BEGIN + SELECT position,rotation_id into _old_pos, _rid FROM rotation_participants WHERE id = _id; + IF _old_pos > _new_pos THEN + UPDATE rotation_participants SET position = position + 1 WHERE rotation_id = _rid AND position < _old_pos AND position >= _new_pos; + ELSE + UPDATE rotation_participants SET position = position - 1 WHERE rotation_id = _rid AND position > _old_pos AND position <= _new_pos; + END IF; + UPDATE rotation_participants SET position = _new_pos WHERE id = _id; + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION remove_rotation_participant(_id UUID) RETURNS UUID AS + $$ + DECLARE + _old_pos INT; + _rid UUID; + BEGIN + SELECT position,rotation_id into _old_pos, _rid FROM rotation_participants WHERE id = _id; + DELETE FROM rotation_participants WHERE id = _id; + UPDATE rotation_participants SET position = position - 1 WHERE rotation_id = _rid AND position > _old_pos; + RETURN _rid; + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + + +CREATE OR REPLACE VIEW on_call_alert_users AS + WITH alert_users AS ( + SELECT act.user_id, act.schedule_id, a.id as alert_id, a.status + FROM + alerts a, + service s, + alert_escalation_levels lvl, + escalation_policy_step step, + escalation_policy_actions act + WHERE s.id = a.service_id + AND step.escalation_policy_id = s.escalation_policy_id + AND step.step_number = lvl.relative_level + AND a.status != 'closed'::enum_alert_status + AND act.escalation_policy_step_id = step.id + GROUP BY user_id, schedule_id, a.id + ) + + SELECT + au.alert_id, + au.status, + CASE WHEN au.user_id IS NULL THEN oc.user_id + ELSE au.user_id + END + FROM alert_users au, on_call oc + WHERE oc.schedule_id = au.schedule_id OR au.schedule_id IS NULL; + + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION update_notifications() RETURNS VOID AS + $$ + BEGIN + INSERT INTO notifications (user_id) + SELECT user_id FROM on_call_alert_users + WHERE status = 'triggered'::enum_alert_status + GROUP BY user_id + ON CONFLICT DO NOTHING; + + DELETE FROM notifications WHERE user_id NOT IN (SELECT user_id FROM on_call_alert_users WHERE status = 'triggered'::enum_alert_status AND user_id = user_id); + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION add_notifications() RETURNS TRIGGER AS + $$ + BEGIN + INSERT INTO notifications (user_id) + SELECT user_id FROM on_call_alert_users + WHERE alert_id = NEW.id AND status = 'triggered'::enum_alert_status + LIMIT 1 + ON CONFLICT DO NOTHING; + + DELETE FROM notifications WHERE user_id NOT IN (SELECT user_id FROM on_call_alert_users WHERE status = 'triggered'::enum_alert_status AND user_id = user_id); + RETURN NEW; + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +CREATE OR REPLACE VIEW needs_notification_sent AS + SELECT trig.alert_id, acm.contact_method_id, cm.type, cm.value, a.description, s.name as service_name + FROM active_contact_methods acm, on_call_alert_users trig, user_contact_methods cm, alerts a, service s + WHERE acm.user_id = trig.user_id + AND acm.user_id = trig.user_id + AND cm.id = acm.contact_method_id + AND cm.disabled = FALSE + AND a.id = trig.alert_id + AND trig.status = 'triggered'::enum_alert_status + AND s.id = a.service_id + AND NOT EXISTS ( + SELECT id + FROM sent_notifications + WHERE alert_id = trig.alert_id + AND contact_method_id = acm.contact_method_id + AND sent_at IS NOT NULL + ); + +DROP VIEW triggered_alert_users; + + + +CREATE OR REPLACE VIEW on_call_next_rotation AS +WITH + p_count AS ( + SELECT rotation_id, count(rp.position) + FROM + rotations r, + rotation_participants rp + WHERE r.id = rp.rotation_id + GROUP BY rotation_id + ) +SELECT + oc.schedule_id, + rp.rotation_id, + rp.user_id, + oc.next_user_id, + + ( + CASE WHEN oc.shift_number % p.count < rp.position + THEN rp.position-(oc.shift_number % p.count) + ELSE rp.position-(oc.shift_number % p.count)+p.count + END + ) * (oc.end_time-oc.start_time) + oc.start_time start_time, + + ( + CASE WHEN oc.shift_number % p.count < rp.position + THEN rp.position-(oc.shift_number % p.count) + ELSE rp.position-(oc.shift_number % p.count)+p.count + END + ) * (oc.end_time-oc.start_time) + oc.end_time end_time, + + ( + CASE WHEN oc.shift_number % p.count < rp.position + THEN rp.position-(oc.shift_number % p.count) + ELSE rp.position-(oc.shift_number % p.count)+p.count + END + ) + oc.shift_number shift_number + +FROM + rotations r, + rotation_participants rp, + p_count p, + on_call oc +WHERE p.rotation_id = r.id + AND rp.rotation_id = r.id + AND oc.rotation_id = r.id +GROUP BY + rp.user_id, + rp.rotation_id, + oc.shift_number, + p.count, + shift_length, + type, + oc.start_time, + oc.end_time, + rp.position, + oc.schedule_id, + oc.next_user_id; + + +-- +migrate Down + +CREATE TABLE schedule ( + id TEXT PRIMARY KEY, + created_at TIMESTAMP DEFAULT now(), + name TEXT, + description TEXT, + time_zone INT +); + +INSERT INTO schedule (id, name, description, time_zone) + SELECT s.id::TEXT, s.name, s.description, date_part('hour', tz.utc_offset) + FROM schedules s, pg_timezone_names tz + WHERE tz.name = s.time_zone; + +CREATE TABLE schedule_layer ( + id UUID PRIMARY KEY, + created_at TIMESTAMP DEFAULT now(), + effective_date TIMESTAMP, + description TEXT, + handoff_day INT, + handoff_time TEXT, + name TEXT, + rotation_type TEXT, + shift_length INT, + shift_length_unit, + schedule_id TEXT REFERENCES schedule (id) +); + +INSERT INTO schedule_layer (id, effective_date, description, handoff_day, handoff_time, name, rotation_type, shift_length, shift_length_unit, schedule_id) + SELECT id::TEXT, start, description, EXTRACT(DOW FROM TIMESTAMP start), date_part('hour', start)::TEXT|':'|date_part('minute', start), name, type::TEXT, shift_length, 'hour', schedule_id::TEXT + FROM rotations; + +CREATE TABLE schedule_layer_user ( + id TEXT PRIMARY KEY DEFAULT gen_random_uuid(), + created_at TIMESTAMP DEFAULT now(), + step_number INT, + user_id UUID REFERENCES users (id), + schedule_layer_id TEXT REFERENCES schedule_layer (id) +); + +INSERT INTO schedule_layer_user (step_number, user_id, schedule_layer_id) + SELECT position+1, user_id, rotation_id::TEXT; + + +ALTER TABLE escalation_policy_actions RENAME COLUMN schedule_id TO old_schedule_id; +ALTER TABLE escalation_policy_actions ADD COLUMN schedule_id UUID REFERENCES schedules (id) ON DELETE CASCADE; +UPDATE escalation_policy_actions SET schedule_id = old_schedule_id::TEXT WHERE old_schedule_id IS NOT NULL; +ALTER TABLE escalation_policy_actions DROP COLUMN old_schedule_id; +ALTER TABLE escalation_policy_actions ADD UNIQUE(escalation_policy_step_id, schedule_id, user_id); +ALTER TABLE escalation_policy_actions ADD CHECK((schedule_id IS NOT NULL AND user_id IS NULL) OR (user_id IS NOT NULL AND schedule_id IS NULL)); + +DROP TABLE shift_creation_locks; +DROP TABLE on_call; +DROP TABLE rotation_participants; +DROP TABLE rotations; +DROP TYPE enum_rotation_type; +DROP TABLE schedules; diff --git a/migrate/migrations/20170605131920-twilio-sms.sql b/migrate/migrations/20170605131920-twilio-sms.sql new file mode 100644 index 0000000000..06f994ea52 --- /dev/null +++ b/migrate/migrations/20170605131920-twilio-sms.sql @@ -0,0 +1,15 @@ + +-- +migrate Up +CREATE TABLE twilio_sms_callbacks( + phone_number TEXT NOT NULL, -- the phone number to be notified + callback_id UUID NOT NULL REFERENCES sent_notifications (id) ON DELETE CASCADE, -- the unique ID of the notification + code INT NOT NULl, -- the alert number with which user should respond to for this alert + twilio_sid TEXT NOT NULL, -- the unique Twilio sid returned from Twilio (returned when alert message not delivered by Twilio) + PRIMARY KEY(phone_number,code), + UNIQUE (phone_number,twilio_sid) +); + + +-- +migrate Down +DROP TABLE IF EXISTS twilio_sms_callbacks; + diff --git a/migrate/migrations/20170605131942-twilio-voice.sql b/migrate/migrations/20170605131942-twilio-voice.sql new file mode 100644 index 0000000000..f13700ab64 --- /dev/null +++ b/migrate/migrations/20170605131942-twilio-voice.sql @@ -0,0 +1,13 @@ + +-- +migrate Up +CREATE TABLE twilio_voice_callbacks( + phone_number TEXT NOT NULL, -- the phone number to which call was made + callback_id UUID NOT NULL REFERENCES sent_notifications (id) ON DELETE CASCADE, -- the unique ID of the notification + code INT NOT NULL, -- the alert number + description TEXT NOT NULL DEFAULT '', -- the alert description + twilio_sid TEXT NOT NULL, -- the unique Twilio sid for the call returned from Twilio (returned when call not delivered by Twilio) + PRIMARY KEY (phone_number,twilio_sid) +); +-- +migrate Down +DROP TABLE IF EXISTS twilio_voice_callbacks; + diff --git a/migrate/migrations/20170607103917-throttle.sql b/migrate/migrations/20170607103917-throttle.sql new file mode 100644 index 0000000000..ca069d6b0a --- /dev/null +++ b/migrate/migrations/20170607103917-throttle.sql @@ -0,0 +1,19 @@ + +-- +migrate Up + +CREATE TYPE enum_throttle_type as ENUM ( + 'notifications' +); + +CREATE TABLE throttle ( + action enum_throttle_type PRIMARY KEY, + client_id UUID, + last_action_time TIMESTAMP NOT NULL DEFAULT now() +); + +INSERT INTO throttle (action) VALUES ('notifications'); + +-- +migrate Down + +DROP TABLE throttle; +DROP TYPE enum_throttle_type; diff --git a/migrate/migrations/20170612101232-escalation-tweaks.sql b/migrate/migrations/20170612101232-escalation-tweaks.sql new file mode 100644 index 0000000000..006fdaa9f3 --- /dev/null +++ b/migrate/migrations/20170612101232-escalation-tweaks.sql @@ -0,0 +1,51 @@ + +-- +migrate Up + +ALTER TABLE escalation_policy_step + ALTER COLUMN id SET DEFAULT gen_random_uuid()::TEXT; +ALTER TABLE escalation_policy_step + ALTER COLUMN delay SET NOT NULL; +ALTER TABLE escalation_policy_step + ALTER COLUMN delay SET DEFAULT 1; + +ALTER TABLE escalation_policy_step ADD UNIQUE(step_number, escalation_policy_id); + + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION set_escalation_policy_step() RETURNS TRIGGER AS + $$ + BEGIN + SELECT count(step_number)+1 INTO NEW.step_number FROM escalation_policy_step WHERE escalation_policy_id = NEW.escalation_policy_id; + RETURN NEW; + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION incr_escalation_policy_steps_on_delete() RETURNS TRIGGER AS + $$ + BEGIN + UPDATE escalation_policy_step + SET step_number = step_number-1 + WHERE escalation_policy_id = OLD.escalation_policy_id + AND step_number > OLD.step_number; + + RETURN OLD; + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +CREATE TRIGGER incr_escalation_policy_steps_on_delete + AFTER DELETE ON escalation_policy_step + FOR EACH ROW + EXECUTE PROCEDURE incr_escalation_policy_steps_on_delete(); + + +CREATE TRIGGER set_escalation_policy_step_on_insert + BEFORE INSERT ON escalation_policy_step + FOR EACH ROW + EXECUTE PROCEDURE set_escalation_policy_step(); + +-- +migrate Down + +DROP FUNCTION set_escalation_policy_step(); diff --git a/migrate/migrations/20170613122551-auth-token.sql b/migrate/migrations/20170613122551-auth-token.sql new file mode 100644 index 0000000000..dda1ac055d --- /dev/null +++ b/migrate/migrations/20170613122551-auth-token.sql @@ -0,0 +1,13 @@ + +-- +migrate Up + +CREATE TABLE auth_token_codes ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + user_id UUID UNIQUE NOT NULL REFERENCES users (id), + expires_at TIMESTAMP NOT NULL DEFAULT now()+'5 minutes'::INTERVAL, + user_agent TEXT NOT NULL +); + +-- +migrate Down + +DROP TABLE auth_token_codes; diff --git a/migrate/migrations/20170619123628-add-constraints.sql b/migrate/migrations/20170619123628-add-constraints.sql new file mode 100644 index 0000000000..33b6ba1620 --- /dev/null +++ b/migrate/migrations/20170619123628-add-constraints.sql @@ -0,0 +1,9 @@ + +-- +migrate Up +ALTER TABLE escalation_policy ADD UNIQUE(name); +ALTER TABLE service ADD UNIQUE(name); + + +-- +migrate Down +ALTER TABLE escalation_policy DROP CONSTRAINT UNIQUE(name); +ALTER TABLE service DROP CONSTRAINT UNIQUE(name); diff --git a/migrate/migrations/20170619164449-bobby-tables.sql b/migrate/migrations/20170619164449-bobby-tables.sql new file mode 100644 index 0000000000..50fed0ec16 --- /dev/null +++ b/migrate/migrations/20170619164449-bobby-tables.sql @@ -0,0 +1,8 @@ + +-- +migrate Up +DROP TABLE alert, incident, incident_assignment, incident_log, maintenance, service_maintenance; + +-- +migrate Down + +SELECT 1; + diff --git a/migrate/migrations/20170620104459-contact-constraints.sql b/migrate/migrations/20170620104459-contact-constraints.sql new file mode 100644 index 0000000000..1022da450e --- /dev/null +++ b/migrate/migrations/20170620104459-contact-constraints.sql @@ -0,0 +1,12 @@ + +-- +migrate Up + +ALTER TABLE user_contact_methods ADD UNIQUE(name, type, user_id); +ALTER TABLE user_contact_methods ADD UNIQUE(type, value); +ALTER TABLE user_contact_methods ALTER id SET DEFAULT gen_random_uuid(); + +-- +migrate Down + +ALTER TABLE user_contact_methods DROP CONSTRAINT UNIQUE(name, type, user_id); +ALTER TABLE user_contact_methods DROP CONSTRAINT UNIQUE(type, value); +ALTER TABLE user_contact_methods ALTER id DROP DEFAULT; diff --git a/migrate/migrations/20170621141923-notification-query-fixes.sql b/migrate/migrations/20170621141923-notification-query-fixes.sql new file mode 100644 index 0000000000..ff9ead2a53 --- /dev/null +++ b/migrate/migrations/20170621141923-notification-query-fixes.sql @@ -0,0 +1,99 @@ + +-- +migrate Up + +CREATE OR REPLACE VIEW on_call_alert_users AS + WITH alert_users AS ( + SELECT act.user_id, act.schedule_id, a.id as alert_id, a.status + FROM + alerts a, + service s, + alert_escalation_levels lvl, + escalation_policy_step step, + escalation_policy_actions act + WHERE s.id = a.service_id + AND step.escalation_policy_id = s.escalation_policy_id + AND step.step_number = lvl.relative_level + AND a.status != 'closed'::enum_alert_status + AND act.escalation_policy_step_id = step.id + GROUP BY user_id, schedule_id, a.id + ) + + SELECT + au.alert_id, + au.status, + CASE WHEN au.user_id IS NULL THEN oc.user_id + ELSE au.user_id + END + FROM alert_users au + LEFT JOIN on_call oc ON au.schedule_id = oc.schedule_id; + + +CREATE OR REPLACE VIEW on_call AS + WITH rotation_details AS ( + SELECT + id, + schedule_id, + start_time, + + (shift_length::TEXT||CASE + WHEN type='hourly'::enum_rotation_type THEN ' hours' + WHEN type='daily'::enum_rotation_type THEN ' days' + ELSE ' weeks' + END)::interval shift, + + (CASE + WHEN type='hourly'::enum_rotation_type THEN extract(epoch from now()-start_time)/3600 + WHEN type='daily'::enum_rotation_type THEN extract(days from now()-start_time) + ELSE extract(days from now()-start_time)/7 + END/shift_length)::BIGINT shift_number + + FROM rotations + ), + p_count AS ( + SELECT rotation_id, count(rp.id) + FROM + rotation_participants rp, + rotation_details d + WHERE rp.rotation_id = d.id + GROUP BY rotation_id + ), + current_participant AS ( + SELECT user_id, p.rotation_id + FROM + rotation_participants rp, + rotation_details d, + p_count p + WHERE rp.rotation_id = d.id + AND p.rotation_id = rp.rotation_id + AND rp.position = d.shift_number % p.count + ), + next_participant AS ( + SELECT user_id, p.rotation_id + FROM + rotation_participants rp, + rotation_details d, + p_count p + WHERE rp.rotation_id = d.id + AND p.rotation_id = rp.rotation_id + AND rp.position = (d.shift_number+1) % p.count + ) + SELECT + d.schedule_id, + d.id rotation_id, + c.user_id, + n.user_id next_user_id, + (d.shift*d.shift_number)+d.start_time start_time, + (d.shift*(d.shift_number+1))+d.start_time end_time, + d.shift_number + FROM + rotation_details d, + current_participant c, + next_participant n + WHERE d.id = c.rotation_id + AND c.rotation_id = n.rotation_id; + + + +-- +migrate Down + +SELECT 1; diff --git a/migrate/migrations/20170621170744-add-country-code.sql b/migrate/migrations/20170621170744-add-country-code.sql new file mode 100644 index 0000000000..bf25d48004 --- /dev/null +++ b/migrate/migrations/20170621170744-add-country-code.sql @@ -0,0 +1,7 @@ + +-- +migrate Up +UPDATE user_contact_methods SET value = '+'||value WHERE (type = 'SMS' OR type = 'VOICE') AND value NOT LIKE '+%'; + +-- +migrate Down + +UPDATE user_contact_methods SET value = substring(value from 2) WHERE (type = 'SMS' OR type = 'VOICE') AND value LIKE '+%'; diff --git a/migrate/migrations/20170623151348-on-call-alert-distinct.sql b/migrate/migrations/20170623151348-on-call-alert-distinct.sql new file mode 100644 index 0000000000..f92897cbe4 --- /dev/null +++ b/migrate/migrations/20170623151348-on-call-alert-distinct.sql @@ -0,0 +1,31 @@ + +-- +migrate Up +CREATE OR REPLACE VIEW on_call_alert_users AS + WITH alert_users AS ( + SELECT act.user_id, act.schedule_id, a.id as alert_id, a.status + FROM + alerts a, + service s, + alert_escalation_levels lvl, + escalation_policy_step step, + escalation_policy_actions act + WHERE s.id = a.service_id + AND step.escalation_policy_id = s.escalation_policy_id + AND step.step_number = lvl.relative_level + AND a.status != 'closed'::enum_alert_status + AND act.escalation_policy_step_id = step.id + GROUP BY user_id, schedule_id, a.id + ) + + SELECT DISTINCT + au.alert_id, + au.status, + CASE WHEN au.user_id IS NULL THEN oc.user_id + ELSE au.user_id + END + FROM alert_users au + LEFT JOIN on_call oc ON au.schedule_id = oc.schedule_id; + +-- +migrate Down + +SELECT 1; diff --git a/migrate/migrations/20170623155346-delete-keys-with-service.sql b/migrate/migrations/20170623155346-delete-keys-with-service.sql new file mode 100644 index 0000000000..3b2280a4f2 --- /dev/null +++ b/migrate/migrations/20170623155346-delete-keys-with-service.sql @@ -0,0 +1,15 @@ + +-- +migrate Up +ALTER TABLE integration +DROP CONSTRAINT integration_service_id_fkey, +ADD CONSTRAINT integration_service_id_fkey + FOREIGN KEY (service_id) + REFERENCES service(id) + ON DELETE CASCADE; + +-- +migrate Down +ALTER TABLE integration +DROP CONSTRAINT integration_service_id_fkey, +ADD CONSTRAINT integration_service_id_fkey + FOREIGN KEY (service_id) + REFERENCES service(id); diff --git a/migrate/migrations/20170629104138-escalation-policy-tweak.sql b/migrate/migrations/20170629104138-escalation-policy-tweak.sql new file mode 100644 index 0000000000..4c1b63b818 --- /dev/null +++ b/migrate/migrations/20170629104138-escalation-policy-tweak.sql @@ -0,0 +1,6 @@ + +-- +migrate Up +ALTER TABLE escalation_policy + ALTER COLUMN id SET DEFAULT gen_random_uuid()::TEXT; + +-- +migrate Down diff --git a/migrate/migrations/20170630095448-integration-to-integration-keys.sql b/migrate/migrations/20170630095448-integration-to-integration-keys.sql new file mode 100644 index 0000000000..de8d8e25f6 --- /dev/null +++ b/migrate/migrations/20170630095448-integration-to-integration-keys.sql @@ -0,0 +1,27 @@ + +-- +migrate Up + +CREATE TYPE enum_integration_keys_type as ENUM ( + 'grafana' +); + + +CREATE TABLE integration_keys ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + name TEXT NOT NULL, + type enum_integration_keys_type NOT NULL, + service_id TEXT NOT NULL REFERENCES service(id) ON DELETE CASCADE, + UNIQUE (name, service_id) +); + + +INSERT INTO integration_keys(name, type, service_id) +SELECT name, 'grafana', service_id +FROM integration; + + +-- +migrate Down + + +DROP TABLE IF EXISTS integration_keys; +DROP TYPE IF EXISTS enum_integration_keys_source; diff --git a/migrate/migrations/20170706102439-esc-zero-index.sql b/migrate/migrations/20170706102439-esc-zero-index.sql new file mode 100644 index 0000000000..b5e21f459c --- /dev/null +++ b/migrate/migrations/20170706102439-esc-zero-index.sql @@ -0,0 +1,51 @@ + +-- +migrate Up + +UPDATE escalation_policy_step SET step_number = step_number - 1; + + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION escalate_alerts() RETURNS void + LANGUAGE plpgsql + AS $$ + BEGIN + UPDATE alerts a + SET escalation_level = escalation_level + 1, last_escalation = now() + FROM service s, escalation_policy_step step, alert_escalation_levels lvl, escalation_policy e + WHERE (last_escalation + (step.delay::TEXT||' minutes')::interval) < now() + AND a.status = 'triggered'::enum_alert_status + AND s.id = a.service_id + AND step.escalation_policy_id = s.escalation_policy_id + AND lvl.alert_id = a.id + AND step.step_number = (a.escalation_level % lvl.levels) + AND e.id = s.escalation_policy_id + AND (e.repeat = -1 OR escalation_level / lvl.levels < e.repeat); + END; + $$; +-- +migrate StatementEnd + +-- +migrate StatementBegin + +CREATE OR REPLACE FUNCTION set_escalation_policy_step() RETURNS trigger + LANGUAGE plpgsql + AS $$ + BEGIN + SELECT count(step_number) INTO NEW.step_number FROM escalation_policy_step WHERE escalation_policy_id = NEW.escalation_policy_id; + RETURN NEW; + END; + $$; +-- +migrate StatementEnd + +CREATE OR REPLACE VIEW alert_escalation_levels AS + SELECT alerts.id AS alert_id, + count(step.id) AS levels, + (alerts.escalation_level % count(step.id)) as relative_level + FROM alerts,escalation_policy_step step,service + WHERE step.escalation_policy_id = service.escalation_policy_id + AND service.id = alerts.service_id + GROUP BY alerts.id; + + +SELECT update_notifications(); + +-- +migrate Down diff --git a/migrate/migrations/20170707135355-esc-cascade-steps-actions.sql b/migrate/migrations/20170707135355-esc-cascade-steps-actions.sql new file mode 100644 index 0000000000..5d0a055906 --- /dev/null +++ b/migrate/migrations/20170707135355-esc-cascade-steps-actions.sql @@ -0,0 +1,12 @@ + +-- +migrate Up + + +ALTER TABLE escalation_policy_step +DROP CONSTRAINT escalation_policy_step_escalation_policy_id_fkey, +ADD CONSTRAINT escalation_policy_step_escalation_policy_id_fkey + FOREIGN KEY (escalation_policy_id) + REFERENCES escalation_policy(id) + ON DELETE CASCADE; + +-- +migrate Down diff --git a/migrate/migrations/20170707153545-limit-cm-per-interval.sql b/migrate/migrations/20170707153545-limit-cm-per-interval.sql new file mode 100644 index 0000000000..dec2863a60 --- /dev/null +++ b/migrate/migrations/20170707153545-limit-cm-per-interval.sql @@ -0,0 +1,5 @@ + +-- +migrate Up +ALTER TABLE user_notification_rules ADD UNIQUE(contact_method_id, delay_minutes); + +-- +migrate Down diff --git a/migrate/migrations/20170710155447-fix-escalations.sql b/migrate/migrations/20170710155447-fix-escalations.sql new file mode 100644 index 0000000000..abb4f3bd45 --- /dev/null +++ b/migrate/migrations/20170710155447-fix-escalations.sql @@ -0,0 +1,24 @@ + +-- +migrate Up + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION escalate_alerts() RETURNS void + LANGUAGE plpgsql + AS $$ + BEGIN + UPDATE alerts a + SET escalation_level = escalation_level + 1, last_escalation = now() + FROM service s, escalation_policy_step step, alert_escalation_levels lvl, escalation_policy e + WHERE (last_escalation + (step.delay::TEXT||' minutes')::interval) < now() + AND a.status = 'triggered'::enum_alert_status + AND s.id = a.service_id + AND step.escalation_policy_id = s.escalation_policy_id + AND lvl.alert_id = a.id + AND step.step_number = (a.escalation_level % lvl.levels) + AND e.id = s.escalation_policy_id + AND (e.repeat = -1 OR (escalation_level+1) / lvl.levels <= e.repeat); + END; + $$; +-- +migrate StatementEnd + +-- +migrate Down diff --git a/migrate/migrations/20170712094434-notification-policy-updates.sql b/migrate/migrations/20170712094434-notification-policy-updates.sql new file mode 100644 index 0000000000..acd8e637f0 --- /dev/null +++ b/migrate/migrations/20170712094434-notification-policy-updates.sql @@ -0,0 +1,423 @@ + +-- +migrate Up + +-- Update to return alert escalation_level +CREATE OR REPLACE VIEW on_call_alert_users AS + WITH alert_users AS ( + SELECT act.user_id, act.schedule_id, a.id as alert_id, a.status, a.escalation_level + FROM + alerts a, + service s, + alert_escalation_levels lvl, + escalation_policy_step step, + escalation_policy_actions act + WHERE s.id = a.service_id + AND lvl.alert_id = a.id + AND step.escalation_policy_id = s.escalation_policy_id + AND step.step_number = lvl.relative_level + AND a.status != 'closed'::enum_alert_status + AND act.escalation_policy_step_id = step.id + GROUP BY user_id, schedule_id, a.id + ) + + SELECT DISTINCT + au.alert_id, + au.status, + CASE WHEN au.user_id IS NULL THEN oc.user_id + ELSE au.user_id + END, + au.escalation_level + FROM alert_users au + LEFT JOIN on_call oc ON au.schedule_id = oc.schedule_id; + +-- new notification tracking table +CREATE TABLE user_notification_cycles ( + id UUID NOT NULL UNIQUE DEFAULT gen_random_uuid(), + user_id UUID NOT NULL REFERENCES users (id) ON DELETE CASCADE, + alert_id BIGINT NOT NULL REFERENCES alerts (id) ON DELETE CASCADE, + escalation_level INT NOT NULL, + started_at TIMESTAMP NOT NULL DEFAULT now(), + PRIMARY KEY (user_id, alert_id) +); + +-- Add new throttle type, so new notification cycle can run during deployment +ALTER TYPE enum_throttle_type RENAME TO enum_throttle_type_old; +CREATE TYPE enum_throttle_type AS ENUM ( + 'notifications', + 'notifications_2' +); +ALTER TABLE throttle ALTER COLUMN action TYPE enum_throttle_type USING action::TEXT::enum_throttle_type; +DROP TYPE enum_throttle_type_old; + + +DROP TRIGGER add_notifications_alert_changed ON alerts; +DROP FUNCTION add_notifications(); +DROP FUNCTION update_notifications(); + + +ALTER TABLE twilio_sms_callbacks DROP CONSTRAINT twilio_sms_callbacks_callback_id_fkey; +ALTER TABLE twilio_voice_callbacks DROP CONSTRAINT twilio_voice_callbacks_callback_id_fkey; +ALTER TABLE sent_notifications DROP CONSTRAINT sent_notifications_alert_id_contact_method_id_key; +ALTER TABLE sent_notifications DROP CONSTRAINT sent_notifications_pkey; +CREATE INDEX ON sent_notifications (id); + +ALTER TABLE sent_notifications ADD COLUMN cycle_id UUID; +ALTER TABLE sent_notifications ADD COLUMN notification_rule_id UUID REFERENCES user_notification_rules (id) ON DELETE CASCADE; + +DELETE FROM sent_notifications s +WHERE NOT EXISTS + ( + SELECT 1 FROM user_notification_rules r + WHERE s.contact_method_id = r.contact_method_id + ); + + +WITH sent_users AS ( + SELECT DISTINCT alert_id, user_id + FROM + sent_notifications s, + user_contact_methods c + WHERE c.id = s.contact_method_id + ), + cycles AS ( + SELECT alert_id, user_id, gen_random_uuid() as cycle_id + FROM sent_users + ) +UPDATE sent_notifications n +SET cycle_id = c.cycle_id +FROM + cycles c, + user_contact_methods m +WHERE n.cycle_id IS NULL + AND m.id = n.contact_method_id + AND m.user_id = c.user_id + AND n.alert_id = c.alert_id +; + +-- +migrate StatementBegin +DO +$do$ +BEGIN + IF EXISTS (SELECT 1 FROM sent_notifications WHERE sent_at IS NULL) THEN + RAISE EXCEPTION 'found in-flight notifications (sent_at was NULL)'; + END IF; + IF EXISTS (SELECT 1 FROM user_contact_method_locks) THEN + RAISE EXCEPTION 'found active contact method locks'; + END IF; +END +$do$ +-- +migrate StatementEnd +WITH sent_times AS + ( + SELECT s.alert_id, c.user_id, min(s.sent_at) AS sent_at + FROM + sent_notifications s, + user_contact_methods c + WHERE c.id = s.contact_method_id + GROUP BY s.alert_id, c.user_id + ), + start_times AS + ( + SELECT + s.alert_id, + s.user_id, + s.sent_at - (max(n.delay_minutes)::TEXT||' minutes')::INTERVAL AS sent_at + FROM + sent_times s, + user_notification_rules n + WHERE n.user_id = s.user_id + GROUP BY s.alert_id, s.user_id, s.sent_at + ) +INSERT INTO user_notification_cycles (id, user_id, alert_id, escalation_level, started_at) +SELECT DISTINCT + s.cycle_id, + c.user_id, + s.alert_id, + a.escalation_level, + t.sent_at +FROM + sent_notifications s, + alerts a, + start_times t, + user_contact_methods c +WHERE a.id = s.alert_id + AND c.id = s.contact_method_id + AND t.alert_id = s.alert_id + AND t.user_id = c.user_id +ORDER BY sent_at DESC +ON CONFLICT (user_id, alert_id) DO NOTHING; + +INSERT INTO sent_notifications + (id, alert_id, contact_method_id, sent_at, cycle_id, notification_rule_id) +SELECT s.id, s.alert_id, s.contact_method_id, s.sent_at, s.cycle_id, n.id +FROM + sent_notifications s, + user_notification_rules n +WHERE n.contact_method_id = s.contact_method_id; + +DELETE FROM sent_notifications WHERE notification_rule_id IS NULL; + +ALTER TABLE sent_notifications ALTER COLUMN cycle_id SET NOT NULL; +ALTER TABLE sent_notifications ALTER COLUMN notification_rule_id SET NOT NULL; +ALTER TABLE sent_notifications ADD UNIQUE(notification_rule_id, cycle_id); + +ALTER TABLE user_notification_rules ADD COLUMN created_at TIMESTAMP DEFAULT now(); + +CREATE OR REPLACE VIEW user_notification_cycle_state AS + SELECT DISTINCT + c.alert_id, + nr.id AS notification_rule_id, + nr.user_id, + c.id AS cycle_id, + (nr.delay_minutes::TEXT||' minutes')::INTERVAL > now()-c.started_at AS future, + nr.id NOT IN ( + SELECT notification_rule_id + FROM sent_notifications + WHERE alert_id = c.alert_id + AND cycle_id = c.id + AND contact_method_id = nr.contact_method_id) AS pending, + c.escalation_level + FROM + user_notification_cycles c, + alerts a, + user_notification_rules nr + WHERE a.id = c.alert_id + AND a.status = 'triggered' + AND nr.user_id = c.user_id + AND nr.id NOT IN ( + SELECT notification_rule_id + FROM sent_notifications + WHERE alert_id = c.alert_id + AND cycle_id = c.id + AND contact_method_id = nr.contact_method_id); + +CREATE OR REPLACE VIEW needs_notification_sent AS + SELECT DISTINCT cs.alert_id, nr.contact_method_id, cm.type, cm.value, a.description, s.name as service_name, nr.id as notification_rule_id, cs.escalation_level, cs.cycle_id FROM + user_notification_cycle_state cs, + alerts a, + user_contact_methods cm, + user_notification_rules nr, + service s + WHERE a.id = cs.alert_id + AND a.status = 'triggered' + AND cs.escalation_level = a.escalation_level + AND cm.id = nr.contact_method_id + AND nr.id = cs.notification_rule_id + AND s.id = a.service_id + AND cs.pending + AND NOT cs.future; + +DROP VIEW active_contact_methods; +DROP TABLE notifications; + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION aquire_user_contact_method_lock(_client_id UUID, _alert_id BIGINT, _contact_method_id UUID) RETURNS UUID AS + $$ + DECLARE + lock_id UUID = gen_random_uuid(); + BEGIN + DELETE FROM user_contact_method_locks WHERE alert_id = _alert_id + AND contact_method_id = _contact_method_id + AND (timestamp + '5 minutes'::interval) < now(); + + INSERT INTO user_contact_method_locks (id, alert_id, contact_method_id, client_id) + VALUES (lock_id, _alert_id, _contact_method_id, _client_id) + RETURNING id INTO lock_id; + + INSERT INTO sent_notifications (id, alert_id, contact_method_id, cycle_id, notification_rule_id) + SELECT lock_id, _alert_id, _contact_method_id, cycle_id, notification_rule_id + FROM needs_notification_sent n + WHERE n.alert_id = _alert_id AND n.contact_method_id = _contact_method_id + ON CONFLICT DO NOTHING; + + RETURN lock_id; + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION update_notification_cycles() RETURNS VOID AS + $$ + BEGIN + INSERT INTO user_notification_cycles (user_id, alert_id, escalation_level) + SELECT user_id, alert_id, escalation_level + FROM on_call_alert_users + WHERE status = 'triggered' + ON CONFLICT DO NOTHING; + + UPDATE user_notification_cycles c + SET escalation_level = a.escalation_level + FROM + alerts a, + user_notification_cycle_state s + WHERE a.id = c.alert_id + AND s.user_id = c.user_id + AND s.alert_id = c.alert_id; + + DELETE FROM user_notification_cycles c + WHERE ( + SELECT count(notification_rule_id) + FROM user_notification_cycle_state s + WHERE s.alert_id = c.alert_id AND s.user_id = c.user_id + LIMIT 1 + ) = 0 + AND c.escalation_level != (SELECT escalation_level FROM alerts WHERE id = c.alert_id); + + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +ALTER TABLE user_notification_rules ALTER COLUMN id SET DEFAULT gen_random_uuid(); + + +-- +migrate Down + + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION aquire_user_contact_method_lock(_client_id UUID, _alert_id BIGINT, _contact_method_id UUID) RETURNS UUID AS + $$ + DECLARE + lock_id UUID = gen_random_uuid(); + BEGIN + DELETE FROM user_contact_method_locks WHERE alert_id = _alert_id + AND contact_method_id = _contact_method_id + AND (timestamp + '5 minutes'::interval) < now(); + + INSERT INTO user_contact_method_locks (id, alert_id, contact_method_id, client_id) + VALUES (lock_id, _alert_id, _contact_method_id, _client_id) + RETURNING id INTO lock_id; + + INSERT INTO sent_notifications (id, alert_id, contact_method_id) VALUES (lock_id, _alert_id, _contact_method_id); + + RETURN lock_id; + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +CREATE TABLE notifications ( + user_id UUID PRIMARY KEY REFERENCES users (id) ON DELETE CASCADE, + started_at TIMESTAMP NOT NULL DEFAULT now() +); + +ALTER TYPE enum_throttle_type RENAME TO enum_throttle_type_old; +CREATE TYPE enum_throttle_type AS ENUM ( + 'notifications' +); + +ALTER TABLE throttle ALTER COLUMN action TYPE enum_throttle_type USING action::TEXT::enum_throttle_type; +DROP TYPE enum_throttle_type_old; + +CREATE VIEW active_contact_methods AS + SELECT users.id as user_id, m.id as contact_method_id + FROM users, user_contact_methods m, user_notification_rules r, notifications n + WHERE m.user_id = users.id + AND r.user_id = users.id + AND n.user_id = users.id + AND r.contact_method_id = m.id + AND ((r.delay_minutes::text||' minutes')::interval + n.started_at) < now(); + +DROP VIEW needs_notification_sent; + + + +DROP VIEW on_call_alert_users; +-- Old version +CREATE OR REPLACE VIEW on_call_alert_users AS +WITH alert_users AS ( + SELECT act.user_id, + act.schedule_id, + a.id AS alert_id, + a.status + FROM alerts a, + service s, + alert_escalation_levels lvl, + escalation_policy_step step, + escalation_policy_actions act + WHERE ((s.id = a.service_id) AND (step.escalation_policy_id = s.escalation_policy_id) AND (step.step_number = lvl.relative_level) AND (a.status <> 'closed'::enum_alert_status) AND (act.escalation_policy_step_id = step.id)) + GROUP BY act.user_id, act.schedule_id, a.id + ) + SELECT DISTINCT au.alert_id, + au.status, + CASE + WHEN (au.user_id IS NULL) THEN oc.user_id + ELSE au.user_id + END AS user_id + FROM (alert_users au + LEFT JOIN on_call oc ON ((au.schedule_id = oc.schedule_id))); + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION add_notifications() RETURNS TRIGGER AS + $$ + BEGIN + INSERT INTO notifications (user_id) + SELECT user_id FROM on_call_alert_users + WHERE alert_id = NEW.id AND status = 'triggered'::enum_alert_status + LIMIT 1 + ON CONFLICT DO NOTHING; + + DELETE FROM notifications WHERE user_id NOT IN (SELECT user_id FROM on_call_alert_users WHERE status = 'triggered'::enum_alert_status AND user_id = user_id); + RETURN NEW; + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +DROP VIEW user_notification_cycle_state; +DROP TABLE user_notification_cycles; + +CREATE TRIGGER add_notifications_alert_changed + AFTER UPDATE OR INSERT ON alerts + FOR EACH ROW + EXECUTE PROCEDURE add_notifications(); + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION update_notifications() RETURNS VOID AS + $$ + BEGIN + INSERT INTO notifications (user_id) + SELECT user_id FROM on_call_alert_users + WHERE status = 'triggered'::enum_alert_status + GROUP BY user_id + ON CONFLICT DO NOTHING; + + DELETE FROM notifications WHERE user_id NOT IN (SELECT user_id FROM on_call_alert_users WHERE status = 'triggered'::enum_alert_status AND user_id = user_id); + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +SELECT update_notifications(); + +ALTER TABLE sent_notifications DROP COLUMN cycle_id; +ALTER TABLE sent_notifications DROP COLUMN notification_rule_id; +ALTER TABLE sent_notifications ADD UNIQUE(alert_id, contact_method_id); +ALTER TABLE user_notification_rules DROP COLUMN created_at; +ALTER TABLE user_notification_rules ALTER id DROP DEFAULT; + +ALTER TABLE escalation_policy ALTER description DROP DEFAULT; + -- ALTER COLUMN repeat DROP DEFAULT; + +ALTER TABLE service ALTER description DROP DEFAULT; + +DROP FUNCTION update_notification_cycles(); +ALTER TABLE sent_notifications ADD CONSTRAINT sent_notifications_pkey PRIMARY KEY (id); + + +ALTER TABLE ONLY twilio_voice_callbacks + ADD CONSTRAINT twilio_voice_callbacks_callback_id_fkey FOREIGN KEY (callback_id) REFERENCES sent_notifications(id) ON DELETE CASCADE; +ALTER TABLE ONLY twilio_sms_callbacks + ADD CONSTRAINT twilio_sms_callbacks_callback_id_fkey FOREIGN KEY (callback_id) REFERENCES sent_notifications(id) ON DELETE CASCADE; +DROP INDEX sent_notifications_id_idx; + +CREATE VIEW needs_notification_sent AS SELECT trig.alert_id, + acm.contact_method_id, + cm.type, + cm.value, + a.description, + s.name AS service_name + FROM active_contact_methods acm, + on_call_alert_users trig, + user_contact_methods cm, + alerts a, + service s + WHERE ((acm.user_id = trig.user_id) AND (acm.user_id = trig.user_id) AND (cm.id = acm.contact_method_id) AND (cm.disabled = false) AND (a.id = trig.alert_id) AND (trig.status = 'triggered'::enum_alert_status) AND (s.id = a.service_id) AND (NOT (EXISTS ( SELECT sent_notifications.id + FROM sent_notifications + WHERE ((sent_notifications.alert_id = trig.alert_id) AND (sent_notifications.contact_method_id = acm.contact_method_id) AND (sent_notifications.sent_at IS NOT NULL)))))); diff --git a/migrate/migrations/20170713113728-escalation-schema-hardening.sql b/migrate/migrations/20170713113728-escalation-schema-hardening.sql new file mode 100644 index 0000000000..6c3296fe12 --- /dev/null +++ b/migrate/migrations/20170713113728-escalation-schema-hardening.sql @@ -0,0 +1,34 @@ + +-- +migrate Up + +UPDATE escalation_policy SET description = '' WHERE description IS NULL; +UPDATE escalation_policy SET repeat = 0 WHERE repeat IS NULL; + +ALTER TABLE escalation_policy ALTER COLUMN description SET DEFAULT ''; +ALTER TABLE escalation_policy ALTER COLUMN description SET NOT NULL; + +ALTER TABLE escalation_policy ALTER COLUMN repeat SET DEFAULT 0; +ALTER TABLE escalation_policy ALTER COLUMN repeat SET NOT NULL; + + +UPDATE service SET name = '' WHERE name IS NULL; +UPDATE service SET description = '' WHERE description IS NULL; + +ALTER TABLE service ALTER COLUMN name SET NOT NULL; + +ALTER TABLE service ALTER COLUMN description SET DEFAULT ''; +ALTER TABLE service ALTER COLUMN description SET NOT NULL; + +-- +migrate Down + +ALTER TABLE escalation_policy + ALTER repeat DROP NOT NULL, + ALTER repeat DROP DEFAULT, + ALTER description DROP NOT NULL, + ALTER description DROP DEFAULT; + +ALTER TABLE service + ALTER name DROP NOT NULL, + ALTER name DROP DEFAULT, + ALTER description DROP NOT NULL, + ALTER description DROP DEFAULT; diff --git a/migrate/migrations/20170714155817-notification-rule-tweak.sql b/migrate/migrations/20170714155817-notification-rule-tweak.sql new file mode 100644 index 0000000000..bbbf632e65 --- /dev/null +++ b/migrate/migrations/20170714155817-notification-rule-tweak.sql @@ -0,0 +1,49 @@ + +-- +migrate Up + +CREATE OR REPLACE VIEW user_notification_cycle_state AS + SELECT DISTINCT + c.alert_id, + nr.id AS notification_rule_id, + nr.user_id, + c.id AS cycle_id, + (nr.delay_minutes::TEXT||' minutes')::INTERVAL > now()-c.started_at AS future, + nr.id NOT IN ( + SELECT notification_rule_id + FROM sent_notifications + WHERE alert_id = c.alert_id + AND cycle_id = c.id + AND contact_method_id = nr.contact_method_id) AS pending, + c.escalation_level + FROM + user_notification_cycles c, + alerts a, + user_notification_rules nr + WHERE a.id = c.alert_id + AND a.status = 'triggered' + AND nr.user_id = c.user_id + AND nr.created_at < c.started_at + (nr.delay_minutes::TEXT||' minutes')::INTERVAL + AND nr.id NOT IN ( + SELECT notification_rule_id + FROM sent_notifications + WHERE alert_id = c.alert_id + AND cycle_id = c.id + AND contact_method_id = nr.contact_method_id); +-- +migrate Down + +CREATE OR REPLACE VIEW user_notification_cycle_state AS + SELECT DISTINCT c.alert_id, + nr.id AS notification_rule_id, + nr.user_id, + c.id AS cycle_id, + ((((nr.delay_minutes)::text || ' minutes'::text))::interval > (now() - (c.started_at)::timestamp with time zone)) AS future, + (NOT (nr.id IN ( SELECT sent_notifications.notification_rule_id + FROM sent_notifications + WHERE ((sent_notifications.alert_id = c.alert_id) AND (sent_notifications.cycle_id = c.id) AND (sent_notifications.contact_method_id = nr.contact_method_id))))) AS pending, + c.escalation_level + FROM user_notification_cycles c, + alerts a, + user_notification_rules nr + WHERE ((a.id = c.alert_id) AND (a.status = 'triggered'::enum_alert_status) AND (nr.user_id = c.user_id) AND (NOT (nr.id IN ( SELECT sent_notifications.notification_rule_id + FROM sent_notifications + WHERE ((sent_notifications.alert_id = c.alert_id) AND (sent_notifications.cycle_id = c.id) AND (sent_notifications.contact_method_id = nr.contact_method_id)))))); diff --git a/migrate/migrations/20170717151241-remove-old-esc-columns.sql b/migrate/migrations/20170717151241-remove-old-esc-columns.sql new file mode 100644 index 0000000000..7519af60fd --- /dev/null +++ b/migrate/migrations/20170717151241-remove-old-esc-columns.sql @@ -0,0 +1,6 @@ + +-- +migrate Up +ALTER TABLE escalation_policy DROP COLUMN team_id; + +-- +migrate Down +ALTER TABLE escalation_policy ADD COLUMN team_id TEXT REFERENCES team (id); diff --git a/migrate/migrations/20170717151336-remove-old-service-columns.sql b/migrate/migrations/20170717151336-remove-old-service-columns.sql new file mode 100644 index 0000000000..9f5d529789 --- /dev/null +++ b/migrate/migrations/20170717151336-remove-old-service-columns.sql @@ -0,0 +1,35 @@ + +-- +migrate Up +ALTER TABLE service + DROP COLUMN created_at, + DROP COLUMN summary, + DROP COLUMN type, + DROP COLUMN self, + DROP COLUMN html_url, + DROP COLUMN status, + DROP COLUMN last_incident_timestamp, + DROP COLUMN conference_url, + DROP COLUMN dialin_number, + DROP COLUMN acknowledgement_timeout, + DROP COLUMN auto_resolve_timeout, + DROP COLUMN maintenance_mode, + DROP COLUMN incident_urgency_type, + DROP COLUMN incident_urgency_value; + +-- +migrate Down + +ALTER TABLE service + ADD COLUMN created_at TIMESTAMP WITH TIME ZONE, + ADD COLUMN summary TEXT, + ADD COLUMN type TEXT, + ADD COLUMN self TEXT, + ADD COLUMN html_url TEXT, + ADD COLUMN status TEXT, + ADD COLUMN last_incident_timestamp TIMESTAMP WITH TIME ZONE, + ADD COLUMN conference_url TEXT, + ADD COLUMN dialin_number TEXT, + ADD COLUMN acknowledgement_timeout INT, + ADD COLUMN auto_resolve_timeout INT, + ADD COLUMN maintenance_mode BOOLEAN, + ADD COLUMN incident_urgency_type TEXT, + ADD COLUMN incident_urgency_value TEXT; diff --git a/migrate/migrations/20170717151358-remove-old-tables.sql b/migrate/migrations/20170717151358-remove-old-tables.sql new file mode 100644 index 0000000000..1c5e467983 --- /dev/null +++ b/migrate/migrations/20170717151358-remove-old-tables.sql @@ -0,0 +1,27 @@ + +-- +migrate Up +DROP TABLE team_user, team, integration; + +-- +migrate Down + +CREATE TABLE team ( + id TEXT PRIMARY KEY, + name TEXT, + description TEXT +); + +CREATE TABLE team_user ( + id TEXT PRIMARY KEY, + team_id TEXT REFERENCES team (id), + user_id UUID REFERENCES users (id) +); + +CREATE TABLE integration ( + id TEXT PRIMARY KEY, + type TEXT, + name TEXT, + integration_key TEXT UNIQUE, + created_at TIMESTAMP WITH TIME ZONE, + service_id TEXT REFERENCES service (id) ON DELETE CASCADE +); + diff --git a/migrate/migrations/20170717152954-ids-to-uuids.sql b/migrate/migrations/20170717152954-ids-to-uuids.sql new file mode 100644 index 0000000000..89208e141f --- /dev/null +++ b/migrate/migrations/20170717152954-ids-to-uuids.sql @@ -0,0 +1,346 @@ +-- +migrate Up + +-- create new tables -- makes migration/testing easier than trying to rename, alter AND recreate +CREATE TABLE escalation_policies ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + name TEXT NOT NULL UNIQUE, + description TEXT NOT NULL DEFAULT '', + repeat INT NOT NULL DEFAULT 0 +); + +CREATE TABLE escalation_policy_steps ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + delay INT NOT NULL DEFAULT 1, + step_number INT NOT NULL DEFAULT -1, + escalation_policy_id UUID NOT NULL REFERENCES escalation_policies (id) ON DELETE CASCADE, + UNIQUE (escalation_policy_id, step_number) +); + +CREATE TABLE services ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + name TEXT NOT NULL UNIQUE, + description TEXT NOT NULL DEFAULT '', + escalation_policy_id UUID NOT NULL REFERENCES escalation_policies (id) +); + +-- copy data over +INSERT INTO escalation_policies (id, name, description, repeat) +SELECT id::UUID, name, description, repeat FROM escalation_policy; + +INSERT INTO escalation_policy_steps (id, delay, step_number, escalation_policy_id) +SELECT id::UUID, delay, step_number, escalation_policy_id::UUID +FROM escalation_policy_step; + +INSERT INTO services (id, name, description, escalation_policy_id) +SELECT id::UUID, name, description, escalation_policy_id::UUID +FROM service; + +-- drop views +DROP VIEW needs_notification_sent, on_call_alert_users, alert_escalation_levels; + +ALTER TABLE alerts + DROP CONSTRAINT alerts_service_id_fkey, + ALTER service_id TYPE UUID USING service_id::UUID, + ADD CONSTRAINT alerts_services_id_fkey FOREIGN KEY (service_id) REFERENCES services (id) ON DELETE CASCADE; + +ALTER TABLE escalation_policy_actions + DROP CONSTRAINT escalation_policy_actions_escalation_policy_step_id_fkey, + ALTER escalation_policy_step_id TYPE UUID USING escalation_policy_step_id::UUID, + ADD CONSTRAINT escalation_policy_actions_escalation_policy_step_id_fkey FOREIGN KEY (escalation_policy_step_id) REFERENCES escalation_policy_steps (id) ON DELETE CASCADE; + +ALTER TABLE integration_keys + DROP CONSTRAINT integration_keys_service_id_fkey, + ALTER service_id TYPE UUID USING service_id::UUID, + ADD CONSTRAINT integration_keys_services_id_fkey FOREIGN KEY (service_id) REFERENCES services (id) ON DELETE CASCADE; + +CREATE VIEW alert_escalation_levels AS + SELECT + alerts.id AS alert_id, + count(step.id) AS levels, + alerts.escalation_level::bigint % count(step.id) AS relative_level + FROM + alerts, + escalation_policy_steps step, + services s + WHERE step.escalation_policy_id = s.escalation_policy_id + AND s.id = alerts.service_id + GROUP BY alerts.id; + +CREATE VIEW on_call_alert_users AS + WITH alert_users AS ( + SELECT act.user_id, + act.schedule_id, + a.id AS alert_id, + a.status, + a.escalation_level + FROM alerts a, + services s, + alert_escalation_levels lvl, + escalation_policy_steps step, + escalation_policy_actions act + WHERE s.id = a.service_id + AND lvl.alert_id = a.id + AND step.escalation_policy_id = s.escalation_policy_id + AND step.step_number = lvl.relative_level + AND a.status <> 'closed'::enum_alert_status + AND act.escalation_policy_step_id = step.id + GROUP BY act.user_id, act.schedule_id, a.id + ) + SELECT DISTINCT au.alert_id, + au.status, + CASE + WHEN au.user_id IS NULL THEN oc.user_id + ELSE au.user_id + END AS user_id, + au.escalation_level + FROM alert_users au + LEFT JOIN on_call oc ON au.schedule_id = oc.schedule_id; + +CREATE VIEW needs_notification_sent AS + SELECT DISTINCT + cs.alert_id, + nr.contact_method_id, + cm.type, + cm.value, + a.description, + s.name AS service_name, + nr.id AS notification_rule_id, + cs.escalation_level, + cs.cycle_id + FROM + user_notification_cycle_state cs, + alerts a, + user_contact_methods cm, + user_notification_rules nr, + services s + WHERE a.id = cs.alert_id + AND a.status = 'triggered'::enum_alert_status + AND cs.escalation_level = a.escalation_level + AND cm.id = nr.contact_method_id + AND nr.id = cs.notification_rule_id + AND s.id = a.service_id + AND cs.pending + AND NOT cs.future; + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION set_escalation_policy_step() RETURNS TRIGGER AS + $$ + BEGIN + SELECT count(step_number) INTO NEW.step_number FROM escalation_policy_steps WHERE escalation_policy_id = NEW.escalation_policy_id; + RETURN NEW; + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION incr_escalation_policy_steps_on_delete() RETURNS TRIGGER AS + $$ + BEGIN + UPDATE escalation_policy_steps + SET step_number = step_number-1 + WHERE escalation_policy_id = OLD.escalation_policy_id + AND step_number > OLD.step_number; + + RETURN OLD; + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION escalate_alerts() RETURNS VOID AS + $$ + BEGIN + UPDATE alerts a + SET escalation_level = escalation_level + 1, last_escalation = now() + FROM services s, escalation_policy_steps step, alert_escalation_levels lvl, escalation_policies e + WHERE (last_escalation + (step.delay::TEXT||' minutes')::interval) < now() + AND a.status = 'triggered'::enum_alert_status + AND s.id = a.service_id + AND step.escalation_policy_id = s.escalation_policy_id + AND lvl.alert_id = a.id + AND step.step_number = ((a.escalation_level + 1) % lvl.levels) + AND e.id = s.escalation_policy_id + AND (e.repeat = -1 OR escalation_level / lvl.levels < e.repeat); + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + + + + +CREATE TRIGGER incr_escalation_policy_steps_on_delete + AFTER DELETE ON escalation_policy_steps + FOR EACH ROW + EXECUTE PROCEDURE incr_escalation_policy_steps_on_delete(); + + +CREATE TRIGGER set_escalation_policy_step_on_insert + BEFORE INSERT ON escalation_policy_steps + FOR EACH ROW + EXECUTE PROCEDURE set_escalation_policy_step(); + + +DROP TABLE service, escalation_policy_step, escalation_policy; + +-- +migrate Down + +-- start by creating old tables +CREATE TABLE escalation_policy ( + id text DEFAULT (gen_random_uuid())::text NOT NULL PRIMARY KEY, + description text DEFAULT ''::text NOT NULL, + name text UNIQUE, + repeat integer DEFAULT 0 NOT NULL +); +CREATE TABLE service ( + id text PRIMARY KEY NOT NULL, + description text DEFAULT ''::text NOT NULL, + name text NOT NULL UNIQUE, + escalation_policy_id text REFERENCES escalation_policy (id) +); + +CREATE TABLE escalation_policy_step ( + id text DEFAULT (gen_random_uuid())::text NOT NULL PRIMARY KEY, + delay integer DEFAULT 1 NOT NULL, + step_number integer, + escalation_policy_id text REFERENCES escalation_policy (id) ON DELETE CASCADE, + UNIQUE(step_number, escalation_policy_id) +); + +-- drop views +DROP VIEW needs_notification_sent, on_call_alert_users, alert_escalation_levels; + +-- copy data over +INSERT INTO escalation_policy (id, name, description, repeat) +SELECT id::text, name, description, repeat +FROM escalation_policies; + +INSERT INTO escalation_policy_step (id, delay, step_number, escalation_policy_id) +SELECT id::text, delay, step_number, escalation_policy_id::text +FROM escalation_policy_steps; + +INSERT INTO service (id, name, description, escalation_policy_id) +SELECT id::text, name, description, escalation_policy_id::text +FROM services; + +ALTER TABLE alerts + DROP CONSTRAINT alerts_services_id_fkey, + ALTER service_id TYPE TEXT USING service_id::TEXT, + ADD CONSTRAINT alerts_service_id_fkey FOREIGN KEY (service_id) REFERENCES service (id) ON DELETE CASCADE; + +ALTER TABLE escalation_policy_actions + DROP CONSTRAINT escalation_policy_actions_escalation_policy_step_id_fkey, + ALTER escalation_policy_step_id TYPE TEXT USING escalation_policy_step_id::TEXT, + ADD CONSTRAINT escalation_policy_actions_escalation_policy_step_id_fkey FOREIGN KEY (escalation_policy_step_id) REFERENCES escalation_policy_step (id) ON DELETE CASCADE; + +ALTER TABLE integration_keys + DROP CONSTRAINT integration_keys_services_id_fkey, + ALTER service_id TYPE TEXT USING service_id::TEXT, + ADD CONSTRAINT integration_keys_service_id_fkey FOREIGN KEY (service_id) REFERENCES service (id) ON DELETE CASCADE; + + +-- restore old views +CREATE VIEW alert_escalation_levels AS SELECT alerts.id AS alert_id, + count(step.id) AS levels, + ((alerts.escalation_level)::bigint % count(step.id)) AS relative_level + FROM alerts, + escalation_policy_step step, + service + WHERE ((step.escalation_policy_id = service.escalation_policy_id) AND (service.id = alerts.service_id)) + GROUP BY alerts.id; + +CREATE VIEW on_call_alert_users AS WITH alert_users AS ( + SELECT act.user_id, + act.schedule_id, + a.id AS alert_id, + a.status, + a.escalation_level + FROM alerts a, + service s, + alert_escalation_levels lvl, + escalation_policy_step step, + escalation_policy_actions act + WHERE ((s.id = a.service_id) AND (lvl.alert_id = a.id) AND (step.escalation_policy_id = s.escalation_policy_id) AND (step.step_number = lvl.relative_level) AND (a.status <> 'closed'::enum_alert_status) AND (act.escalation_policy_step_id = step.id)) + GROUP BY act.user_id, act.schedule_id, a.id + ) + SELECT DISTINCT au.alert_id, + au.status, + CASE + WHEN (au.user_id IS NULL) THEN oc.user_id + ELSE au.user_id + END AS user_id, + au.escalation_level + FROM (alert_users au + LEFT JOIN on_call oc ON ((au.schedule_id = oc.schedule_id))); + +CREATE VIEW needs_notification_sent AS + SELECT DISTINCT cs.alert_id, + nr.contact_method_id, + cm.type, + cm.value, + a.description, + s.name AS service_name, + nr.id AS notification_rule_id, + cs.escalation_level, + cs.cycle_id + FROM user_notification_cycle_state cs, + alerts a, + user_contact_methods cm, + user_notification_rules nr, + service s + WHERE ((a.id = cs.alert_id) AND (a.status = 'triggered'::enum_alert_status) AND (cs.escalation_level = a.escalation_level) AND (cm.id = nr.contact_method_id) AND (nr.id = cs.notification_rule_id) AND (s.id = a.service_id) AND cs.pending AND (NOT cs.future)); + +-- restore old function code + +-- +migrate StatementBegin + +CREATE OR REPLACE FUNCTION set_escalation_policy_step() RETURNS trigger + LANGUAGE plpgsql + AS $$ + BEGIN + SELECT count(step_number) INTO NEW.step_number FROM escalation_policy_step WHERE escalation_policy_id = NEW.escalation_policy_id; + RETURN NEW; + END; + $$; +-- +migrate StatementEnd +-- +migrate StatementBegin + +CREATE OR REPLACE FUNCTION incr_escalation_policy_steps_on_delete() RETURNS trigger + LANGUAGE plpgsql + AS $$ + BEGIN + UPDATE escalation_policy_step + SET step_number = step_number-1 + WHERE escalation_policy_id = OLD.escalation_policy_id + AND step_number > OLD.step_number; + + RETURN OLD; + END; + $$; +-- +migrate StatementEnd +-- +migrate StatementBegin + +CREATE OR REPLACE FUNCTION escalate_alerts() RETURNS void + LANGUAGE plpgsql + AS $$ + BEGIN + UPDATE alerts a + SET escalation_level = escalation_level + 1, last_escalation = now() + FROM service s, escalation_policy_step step, alert_escalation_levels lvl, escalation_policy e + WHERE (last_escalation + (step.delay::TEXT||' minutes')::interval) < now() + AND a.status = 'triggered'::enum_alert_status + AND s.id = a.service_id + AND step.escalation_policy_id = s.escalation_policy_id + AND lvl.alert_id = a.id + AND step.step_number = (a.escalation_level % lvl.levels) + AND e.id = s.escalation_policy_id + AND (e.repeat = -1 OR (escalation_level+1) / lvl.levels <= e.repeat); + END; + $$; +-- +migrate StatementEnd + +CREATE TRIGGER incr_escalation_policy_steps_on_delete AFTER DELETE ON escalation_policy_step FOR EACH ROW EXECUTE PROCEDURE incr_escalation_policy_steps_on_delete(); +CREATE TRIGGER set_escalation_policy_step_on_insert BEFORE INSERT ON escalation_policy_step FOR EACH ROW EXECUTE PROCEDURE set_escalation_policy_step(); + +DROP TABLE escalation_policy_steps, services, escalation_policies; + + diff --git a/migrate/migrations/20170724162219-fix-alert-escalations.sql b/migrate/migrations/20170724162219-fix-alert-escalations.sql new file mode 100644 index 0000000000..943aeb5271 --- /dev/null +++ b/migrate/migrations/20170724162219-fix-alert-escalations.sql @@ -0,0 +1,43 @@ + +-- +migrate Up + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION escalate_alerts() RETURNS VOID AS + $$ + BEGIN + UPDATE alerts a + SET escalation_level = escalation_level + 1, last_escalation = now() + FROM services s, escalation_policy_steps step, alert_escalation_levels lvl, escalation_policies e + WHERE (last_escalation + (step.delay::TEXT||' minutes')::interval) < now() + AND a.status = 'triggered'::enum_alert_status + AND s.id = a.service_id + AND step.escalation_policy_id = s.escalation_policy_id + AND lvl.alert_id = a.id + AND step.step_number = lvl.relative_level + AND e.id = s.escalation_policy_id + AND (e.repeat = -1 OR (escalation_level+1) / lvl.levels <= e.repeat); + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + + +-- +migrate Down + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION escalate_alerts() RETURNS VOID AS + $$ + BEGIN + UPDATE alerts a + SET escalation_level = escalation_level + 1, last_escalation = now() + FROM services s, escalation_policy_steps step, alert_escalation_levels lvl, escalation_policies e + WHERE (last_escalation + (step.delay::TEXT||' minutes')::interval) < now() + AND a.status = 'triggered'::enum_alert_status + AND s.id = a.service_id + AND step.escalation_policy_id = s.escalation_policy_id + AND lvl.alert_id = a.id + AND step.step_number = ((a.escalation_level + 1) % lvl.levels) + AND e.id = s.escalation_policy_id + AND (e.repeat = -1 OR escalation_level / lvl.levels < e.repeat); + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd diff --git a/migrate/migrations/20170725105059-rotations-shift-length-check.sql b/migrate/migrations/20170725105059-rotations-shift-length-check.sql new file mode 100644 index 0000000000..0662a9782f --- /dev/null +++ b/migrate/migrations/20170725105059-rotations-shift-length-check.sql @@ -0,0 +1,9 @@ + +-- +migrate Up + +ALTER TABLE rotations ADD CHECK (shift_length > 0); + +-- +migrate Down + +ALTER TABLE rotations DROP CONSTRAINT rotations_shift_length_check; + diff --git a/migrate/migrations/20170725105905-fix-shift-calculation.sql b/migrate/migrations/20170725105905-fix-shift-calculation.sql new file mode 100644 index 0000000000..15d85b98b4 --- /dev/null +++ b/migrate/migrations/20170725105905-fix-shift-calculation.sql @@ -0,0 +1,150 @@ + +-- +migrate Up + +-- need to coerce to bigint (instead of default of double precision) to get the correct shift_number. +-- need to compare time intervals in the correct time zone + +CREATE OR REPLACE VIEW on_call AS + WITH rotation_details AS ( + SELECT + rotations.id, + rotations.schedule_id, + rotations.start_time, + (((rotations.shift_length)::text || + CASE + WHEN (rotations.type = 'hourly') THEN ' hours' + WHEN (rotations.type = 'daily') THEN ' days' + ELSE ' weeks' + END))::interval AS shift, + (( + CASE + WHEN (rotations.type = 'hourly') THEN (date_part('epoch', ((now() at time zone s.time_zone) - (rotations.start_time at time zone s.time_zone)))::bigint / 3600) -- number of hours + WHEN (rotations.type = 'daily') THEN date_part('days', ((now() at time zone s.time_zone) - (rotations.start_time at time zone s.time_zone)))::bigint -- number of days + ELSE (date_part('days'::text, ((now() at time zone s.time_zone) - (rotations.start_time at time zone s.time_zone)))::bigint / 7) -- number of weeks + END / rotations.shift_length)) AS shift_number + FROM + rotations, + schedules s + WHERE s.id = rotations.schedule_id + ), + p_count AS ( + SELECT + rp.rotation_id, + count(rp.id) AS count + FROM + rotation_participants rp, + rotation_details d_1 + WHERE (rp.rotation_id = d_1.id) + GROUP BY rp.rotation_id + ), + current_participant AS ( + SELECT + rp.user_id, + p.rotation_id + FROM + rotation_participants rp, + rotation_details d_1, + p_count p + WHERE ((rp.rotation_id = d_1.id) + AND (p.rotation_id = rp.rotation_id) + AND (rp."position" = (d_1.shift_number % p.count))) + ), + next_participant AS ( + SELECT + rp.user_id, + p.rotation_id + FROM + rotation_participants rp, + rotation_details d_1, + p_count p + WHERE ((rp.rotation_id = d_1.id) + AND (p.rotation_id = rp.rotation_id) + AND (rp."position" = ((d_1.shift_number + 1) % p.count))) + ) + SELECT + d.schedule_id, + d.id AS rotation_id, + c.user_id, + n.user_id AS next_user_id, + ((d.shift * (d.shift_number)::bigint) + d.start_time) AS start_time, + ((d.shift * ((d.shift_number + 1))::bigint) + d.start_time) AS end_time, + d.shift_number + FROM + rotation_details d, + current_participant c, + next_participant n + WHERE ((d.id = c.rotation_id) + AND (c.rotation_id = n.rotation_id)); + + +-- +migrate Down + + +CREATE OR REPLACE VIEW on_call AS + WITH rotation_details AS ( + SELECT + rotations.id, + rotations.schedule_id, + rotations.start_time, + (((rotations.shift_length)::text || + CASE + WHEN (rotations.type = 'hourly'::enum_rotation_type) THEN ' hours'::text + WHEN (rotations.type = 'daily'::enum_rotation_type) THEN ' days'::text + ELSE ' weeks'::text + END))::interval AS shift, + (( + CASE + WHEN (rotations.type = 'hourly'::enum_rotation_type) THEN (date_part('epoch'::text, (now() - rotations.start_time)) / (3600)::double precision) + WHEN (rotations.type = 'daily'::enum_rotation_type) THEN date_part('days'::text, (now() - rotations.start_time)) + ELSE (date_part('days'::text, (now() - rotations.start_time)) / (7)::double precision) + END / (rotations.shift_length)::double precision))::bigint AS shift_number + FROM rotations + ), + p_count AS ( + SELECT + rp.rotation_id, + count(rp.id) AS count + FROM + rotation_participants rp, + rotation_details d_1 + WHERE (rp.rotation_id = d_1.id) + GROUP BY rp.rotation_id + ), + current_participant AS ( + SELECT + rp.user_id, + p.rotation_id + FROM + rotation_participants rp, + rotation_details d_1, + p_count p + WHERE ((rp.rotation_id = d_1.id) + AND (p.rotation_id = rp.rotation_id) + AND (rp."position" = (d_1.shift_number % p.count))) + ), + next_participant AS ( + SELECT + rp.user_id, + p.rotation_id + FROM + rotation_participants rp, + rotation_details d_1, + p_count p + WHERE ((rp.rotation_id = d_1.id) + AND (p.rotation_id = rp.rotation_id) + AND (rp."position" = ((d_1.shift_number + 1) % p.count))) + ) + SELECT + d.schedule_id, + d.id AS rotation_id, + c.user_id, + n.user_id AS next_user_id, + ((d.shift * (d.shift_number)::double precision) + d.start_time) AS start_time, + ((d.shift * ((d.shift_number + 1))::double precision) + d.start_time) AS end_time, + d.shift_number + FROM + rotation_details d, + current_participant c, + next_participant n + WHERE ((d.id = c.rotation_id) + AND (c.rotation_id = n.rotation_id)); diff --git a/migrate/migrations/20170726141849-handle-missing-users.sql b/migrate/migrations/20170726141849-handle-missing-users.sql new file mode 100644 index 0000000000..cebfdde4c8 --- /dev/null +++ b/migrate/migrations/20170726141849-handle-missing-users.sql @@ -0,0 +1,70 @@ + +-- +migrate Up + + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION update_notification_cycles() RETURNS VOID AS + $$ + BEGIN + INSERT INTO user_notification_cycles (user_id, alert_id, escalation_level) + SELECT user_id, alert_id, escalation_level + FROM on_call_alert_users + WHERE status = 'triggered' + AND user_id IS NOT NULL + ON CONFLICT DO NOTHING; + + UPDATE user_notification_cycles c + SET escalation_level = a.escalation_level + FROM + alerts a, + user_notification_cycle_state s + WHERE a.id = c.alert_id + AND s.user_id = c.user_id + AND s.alert_id = c.alert_id; + + DELETE FROM user_notification_cycles c + WHERE ( + SELECT count(notification_rule_id) + FROM user_notification_cycle_state s + WHERE s.alert_id = c.alert_id AND s.user_id = c.user_id + LIMIT 1 + ) = 0 + AND c.escalation_level != (SELECT escalation_level FROM alerts WHERE id = c.alert_id); + + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +-- +migrate Down + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION update_notification_cycles() RETURNS VOID AS + $$ + BEGIN + INSERT INTO user_notification_cycles (user_id, alert_id, escalation_level) + SELECT user_id, alert_id, escalation_level + FROM on_call_alert_users + WHERE status = 'triggered' + ON CONFLICT DO NOTHING; + + UPDATE user_notification_cycles c + SET escalation_level = a.escalation_level + FROM + alerts a, + user_notification_cycle_state s + WHERE a.id = c.alert_id + AND s.user_id = c.user_id + AND s.alert_id = c.alert_id; + + DELETE FROM user_notification_cycles c + WHERE ( + SELECT count(notification_rule_id) + FROM user_notification_cycle_state s + WHERE s.alert_id = c.alert_id AND s.user_id = c.user_id + LIMIT 1 + ) = 0 + AND c.escalation_level != (SELECT escalation_level FROM alerts WHERE id = c.alert_id); + + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd diff --git a/migrate/migrations/20170726143800-no-oncall-for-future-rotations.sql b/migrate/migrations/20170726143800-no-oncall-for-future-rotations.sql new file mode 100644 index 0000000000..00e8cdeb09 --- /dev/null +++ b/migrate/migrations/20170726143800-no-oncall-for-future-rotations.sql @@ -0,0 +1,152 @@ + +-- +migrate Up + + +CREATE OR REPLACE VIEW on_call AS + WITH rotation_details AS ( + SELECT + rotations.id, + rotations.schedule_id, + rotations.start_time, + (((rotations.shift_length)::text || + CASE + WHEN (rotations.type = 'hourly') THEN ' hours' + WHEN (rotations.type = 'daily') THEN ' days' + ELSE ' weeks' + END))::interval AS shift, + (( + CASE + WHEN (rotations.type = 'hourly') THEN (date_part('epoch', ((now() at time zone s.time_zone) - (rotations.start_time at time zone s.time_zone)))::bigint / 3600) -- number of hours + WHEN (rotations.type = 'daily') THEN date_part('days', ((now() at time zone s.time_zone) - (rotations.start_time at time zone s.time_zone)))::bigint -- number of days + ELSE (date_part('days'::text, ((now() at time zone s.time_zone) - (rotations.start_time at time zone s.time_zone)))::bigint / 7) -- number of weeks + END / rotations.shift_length)) AS shift_number + FROM + rotations, + schedules s + WHERE s.id = rotations.schedule_id + AND rotations.start_time <= now() + ), + p_count AS ( + SELECT + rp.rotation_id, + count(rp.id) AS count + FROM + rotation_participants rp, + rotation_details d_1 + WHERE (rp.rotation_id = d_1.id) + GROUP BY rp.rotation_id + ), + current_participant AS ( + SELECT + rp.user_id, + p.rotation_id + FROM + rotation_participants rp, + rotation_details d_1, + p_count p + WHERE ((rp.rotation_id = d_1.id) + AND (p.rotation_id = rp.rotation_id) + AND (rp."position" = (d_1.shift_number % p.count))) + ), + next_participant AS ( + SELECT + rp.user_id, + p.rotation_id + FROM + rotation_participants rp, + rotation_details d_1, + p_count p + WHERE ((rp.rotation_id = d_1.id) + AND (p.rotation_id = rp.rotation_id) + AND (rp."position" = ((d_1.shift_number + 1) % p.count))) + ) + SELECT + d.schedule_id, + d.id AS rotation_id, + c.user_id, + n.user_id AS next_user_id, + ((d.shift * (d.shift_number)::bigint) + d.start_time) AS start_time, + ((d.shift * ((d.shift_number + 1))::bigint) + d.start_time) AS end_time, + d.shift_number + FROM + rotation_details d, + current_participant c, + next_participant n + WHERE ((d.id = c.rotation_id) + AND (c.rotation_id = n.rotation_id)); + + +-- +migrate Down + + +CREATE OR REPLACE VIEW on_call AS + WITH rotation_details AS ( + SELECT + rotations.id, + rotations.schedule_id, + rotations.start_time, + (((rotations.shift_length)::text || + CASE + WHEN (rotations.type = 'hourly') THEN ' hours' + WHEN (rotations.type = 'daily') THEN ' days' + ELSE ' weeks' + END))::interval AS shift, + (( + CASE + WHEN (rotations.type = 'hourly') THEN (date_part('epoch', ((now() at time zone s.time_zone) - (rotations.start_time at time zone s.time_zone)))::bigint / 3600) -- number of hours + WHEN (rotations.type = 'daily') THEN date_part('days', ((now() at time zone s.time_zone) - (rotations.start_time at time zone s.time_zone)))::bigint -- number of days + ELSE (date_part('days'::text, ((now() at time zone s.time_zone) - (rotations.start_time at time zone s.time_zone)))::bigint / 7) -- number of weeks + END / rotations.shift_length)) AS shift_number + FROM + rotations, + schedules s + WHERE s.id = rotations.schedule_id + ), + p_count AS ( + SELECT + rp.rotation_id, + count(rp.id) AS count + FROM + rotation_participants rp, + rotation_details d_1 + WHERE (rp.rotation_id = d_1.id) + GROUP BY rp.rotation_id + ), + current_participant AS ( + SELECT + rp.user_id, + p.rotation_id + FROM + rotation_participants rp, + rotation_details d_1, + p_count p + WHERE ((rp.rotation_id = d_1.id) + AND (p.rotation_id = rp.rotation_id) + AND (rp."position" = (d_1.shift_number % p.count))) + ), + next_participant AS ( + SELECT + rp.user_id, + p.rotation_id + FROM + rotation_participants rp, + rotation_details d_1, + p_count p + WHERE ((rp.rotation_id = d_1.id) + AND (p.rotation_id = rp.rotation_id) + AND (rp."position" = ((d_1.shift_number + 1) % p.count))) + ) + SELECT + d.schedule_id, + d.id AS rotation_id, + c.user_id, + n.user_id AS next_user_id, + ((d.shift * (d.shift_number)::bigint) + d.start_time) AS start_time, + ((d.shift * ((d.shift_number + 1))::bigint) + d.start_time) AS end_time, + d.shift_number + FROM + rotation_details d, + current_participant c, + next_participant n + WHERE ((d.id = c.rotation_id) + AND (c.rotation_id = n.rotation_id)); diff --git a/migrate/migrations/20170726155056-twilio-sms-errors.sql b/migrate/migrations/20170726155056-twilio-sms-errors.sql new file mode 100644 index 0000000000..6f9833c771 --- /dev/null +++ b/migrate/migrations/20170726155056-twilio-sms-errors.sql @@ -0,0 +1,14 @@ + +-- +migrate Up + +CREATE TABLE twilio_sms_errors ( + phone_number TEXT NOT NULL, + error_message TEXT NOT NULL, + outgoing BOOLEAN NOT NULL, + occurred_at TIMESTAMP NOT NULL DEFAULT now() +); +CREATE INDEX ON twilio_sms_errors (phone_number, outgoing, occurred_at); + +-- +migrate Down + +DROP TABLE twilio_sms_errors; diff --git a/migrate/migrations/20170726155351-twilio-voice-errors.sql b/migrate/migrations/20170726155351-twilio-voice-errors.sql new file mode 100644 index 0000000000..bb8fad687e --- /dev/null +++ b/migrate/migrations/20170726155351-twilio-voice-errors.sql @@ -0,0 +1,14 @@ + +-- +migrate Up + +CREATE TABLE twilio_voice_errors ( + phone_number TEXT NOT NULL, + error_message TEXT NOT NULL, + outgoing BOOLEAN NOT NULL, + occurred_at TIMESTAMP NOT NULL DEFAULT now() +); +CREATE INDEX ON twilio_voice_errors (phone_number, outgoing, occurred_at); + +-- +migrate Down + +DROP TABLE twilio_voice_errors; diff --git a/migrate/migrations/20170802114735-alert_logs_enum_update.sql b/migrate/migrations/20170802114735-alert_logs_enum_update.sql new file mode 100644 index 0000000000..e929ee77ae --- /dev/null +++ b/migrate/migrations/20170802114735-alert_logs_enum_update.sql @@ -0,0 +1,31 @@ + +-- +migrate Up +-- Add new alert_log_event type +ALTER TYPE enum_alert_log_event RENAME TO enum_alert_log_event_old; +CREATE TYPE enum_alert_log_event AS ENUM ( + 'created', + 'reopened', + 'status_changed', + 'assignment_changed', + 'escalated', + 'closed', + 'notification_sent', + 'response_received' +); +ALTER TABLE alert_logs ALTER COLUMN event TYPE enum_alert_log_event USING event::TEXT::enum_alert_log_event; +DROP TYPE enum_alert_log_event_old; + +-- +migrate Down + +ALTER TYPE enum_alert_log_event RENAME TO enum_alert_log_event_old; +CREATE TYPE enum_alert_log_event AS ENUM ( + 'created', + 'reopened', + 'status_changed', + 'assignment_changed', + 'escalated', + 'closed' +); + +ALTER TABLE alert_logs ALTER COLUMN event TYPE enum_alert_log_event USING event::TEXT::enum_alert_log_event; +DROP TYPE enum_alert_log_event_old; diff --git a/migrate/migrations/20170802160314-add-timezones.sql b/migrate/migrations/20170802160314-add-timezones.sql new file mode 100644 index 0000000000..8c944cbe99 --- /dev/null +++ b/migrate/migrations/20170802160314-add-timezones.sql @@ -0,0 +1,143 @@ + +-- +migrate Up +ALTER TABLE alert_logs ALTER "timestamp" TYPE TIMESTAMP WITH TIME ZONE; +ALTER TABLE alerts ALTER last_escalation TYPE TIMESTAMP WITH TIME ZONE; +ALTER TABLE sent_notifications ALTER sent_at TYPE TIMESTAMP WITH TIME ZONE; +ALTER TABLE throttle ALTER last_action_time TYPE TIMESTAMP WITH TIME ZONE; +ALTER TABLE twilio_sms_errors ALTER occurred_at TYPE TIMESTAMP WITH TIME ZONE; +ALTER TABLE twilio_voice_errors ALTER occurred_at TYPE TIMESTAMP WITH TIME ZONE; +ALTER TABLE user_contact_method_locks ALTER "timestamp" TYPE TIMESTAMP WITH TIME ZONE; + + +DROP VIEW needs_notification_sent; +DROP VIEW user_notification_cycle_state; + +ALTER TABLE user_notification_cycles ALTER started_at TYPE TIMESTAMP WITH TIME ZONE; +ALTER TABLE user_notification_rules ALTER created_at TYPE TIMESTAMP WITH TIME ZONE; + +CREATE OR REPLACE VIEW user_notification_cycle_state AS + SELECT DISTINCT + c.alert_id, + nr.id AS notification_rule_id, + nr.user_id, + c.id AS cycle_id, + (nr.delay_minutes::TEXT||' minutes')::INTERVAL > now()-c.started_at AS future, + nr.id NOT IN ( + SELECT notification_rule_id + FROM sent_notifications + WHERE alert_id = c.alert_id + AND cycle_id = c.id + AND contact_method_id = nr.contact_method_id) AS pending, + c.escalation_level + FROM + user_notification_cycles c, + alerts a, + user_notification_rules nr + WHERE a.id = c.alert_id + AND a.status = 'triggered' + AND nr.user_id = c.user_id + AND nr.created_at < c.started_at + (nr.delay_minutes::TEXT||' minutes')::INTERVAL + AND nr.id NOT IN ( + SELECT notification_rule_id + FROM sent_notifications + WHERE alert_id = c.alert_id + AND cycle_id = c.id + AND contact_method_id = nr.contact_method_id); + +CREATE VIEW needs_notification_sent AS + SELECT DISTINCT + cs.alert_id, + nr.contact_method_id, + cm.type, + cm.value, + a.description, + s.name AS service_name, + nr.id AS notification_rule_id, + cs.escalation_level, + cs.cycle_id + FROM + user_notification_cycle_state cs, + alerts a, + user_contact_methods cm, + user_notification_rules nr, + services s + WHERE a.id = cs.alert_id + AND a.status = 'triggered'::enum_alert_status + AND cs.escalation_level = a.escalation_level + AND cm.id = nr.contact_method_id + AND nr.id = cs.notification_rule_id + AND s.id = a.service_id + AND cs.pending + AND NOT cs.future; + + +-- +migrate Down +ALTER TABLE alert_logs ALTER "timestamp" TYPE TIMESTAMP WITHOUT TIME ZONE; +ALTER TABLE alerts ALTER last_escalation TYPE TIMESTAMP WITHOUT TIME ZONE; +ALTER TABLE sent_notifications ALTER sent_at TYPE TIMESTAMP WITHOUT TIME ZONE; +ALTER TABLE throttle ALTER last_action_time TYPE TIMESTAMP WITHOUT TIME ZONE; +ALTER TABLE twilio_sms_errors ALTER occurred_at TYPE TIMESTAMP WITHOUT TIME ZONE; +ALTER TABLE twilio_voice_errors ALTER occurred_at TYPE TIMESTAMP WITHOUT TIME ZONE; +ALTER TABLE user_contact_method_locks ALTER "timestamp" TYPE TIMESTAMP WITHOUT TIME ZONE; + + +DROP VIEW needs_notification_sent; +DROP VIEW user_notification_cycle_state; + +ALTER TABLE user_notification_cycles ALTER started_at TYPE TIMESTAMP WITHOUT TIME ZONE; +ALTER TABLE user_notification_rules ALTER created_at TYPE TIMESTAMP WITHOUT TIME ZONE; + +CREATE OR REPLACE VIEW user_notification_cycle_state AS + SELECT DISTINCT + c.alert_id, + nr.id AS notification_rule_id, + nr.user_id, + c.id AS cycle_id, + (nr.delay_minutes::TEXT||' minutes')::INTERVAL > now()-c.started_at AS future, + nr.id NOT IN ( + SELECT notification_rule_id + FROM sent_notifications + WHERE alert_id = c.alert_id + AND cycle_id = c.id + AND contact_method_id = nr.contact_method_id) AS pending, + c.escalation_level + FROM + user_notification_cycles c, + alerts a, + user_notification_rules nr + WHERE a.id = c.alert_id + AND a.status = 'triggered' + AND nr.user_id = c.user_id + AND nr.created_at < c.started_at + (nr.delay_minutes::TEXT||' minutes')::INTERVAL + AND nr.id NOT IN ( + SELECT notification_rule_id + FROM sent_notifications + WHERE alert_id = c.alert_id + AND cycle_id = c.id + AND contact_method_id = nr.contact_method_id); + +CREATE VIEW needs_notification_sent AS + SELECT DISTINCT + cs.alert_id, + nr.contact_method_id, + cm.type, + cm.value, + a.description, + s.name AS service_name, + nr.id AS notification_rule_id, + cs.escalation_level, + cs.cycle_id + FROM + user_notification_cycle_state cs, + alerts a, + user_contact_methods cm, + user_notification_rules nr, + services s + WHERE a.id = cs.alert_id + AND a.status = 'triggered'::enum_alert_status + AND cs.escalation_level = a.escalation_level + AND cm.id = nr.contact_method_id + AND nr.id = cs.notification_rule_id + AND s.id = a.service_id + AND cs.pending + AND NOT cs.future; \ No newline at end of file diff --git a/migrate/migrations/20170808110638-user-email-nullable-allowed.sql b/migrate/migrations/20170808110638-user-email-nullable-allowed.sql new file mode 100644 index 0000000000..bceed3cce7 --- /dev/null +++ b/migrate/migrations/20170808110638-user-email-nullable-allowed.sql @@ -0,0 +1,10 @@ + +-- +migrate Up +ALTER TABLE users + DROP CONSTRAINT goalert_user_email_key, + ALTER COLUMN EMAIL SET DEFAULT ''; + +-- +migrate Down +ALTER TABLE users + ADD CONSTRAINT goalert_user_email_key UNIQUE (email), + ALTER COLUMN EMAIL DROP DEFAULT; \ No newline at end of file diff --git a/migrate/migrations/20170811110036-add-generic-integration-key.sql b/migrate/migrations/20170811110036-add-generic-integration-key.sql new file mode 100644 index 0000000000..676d243aa9 --- /dev/null +++ b/migrate/migrations/20170811110036-add-generic-integration-key.sql @@ -0,0 +1,47 @@ + +-- +migrate Up +-- Add new integration key type 'generic' +ALTER TYPE enum_integration_keys_type RENAME TO enum_integration_keys_type_old; +CREATE TYPE enum_integration_keys_type AS ENUM ( + 'grafana', + 'generic' +); +ALTER TABLE integration_keys ALTER COLUMN type TYPE enum_integration_keys_type USING type::TEXT::enum_integration_keys_type; +DROP TYPE enum_integration_keys_type_old; + +-- Add new alert source type 'generic' +ALTER TYPE enum_alert_source RENAME TO enum_alert_source_old; +CREATE TYPE enum_alert_source AS ENUM ( + 'grafana', + 'manual', + 'generic' +); +ALTER TABLE alerts + ALTER COLUMN source DROP DEFAULT, + ALTER COLUMN source TYPE enum_alert_source USING source::TEXT::enum_alert_source, + ALTER COLUMN source SET DEFAULT 'manual'; +DROP TYPE enum_alert_source_old; + + +-- +migrate Down + +-- Go back to just grafana keys (generic keys will manually have to be dropped first if they exist) +ALTER TYPE enum_integration_keys_type RENAME TO enum_integration_keys_type_old; +CREATE TYPE enum_integration_keys_type AS ENUM ( + 'grafana' +); +ALTER TABLE integration_keys ALTER COLUMN type TYPE enum_integration_keys_type USING type::TEXT::enum_integration_keys_type; +DROP TYPE enum_integration_keys_type_old; + +-- Go back to just grafana keys (generic keys will manually have to be dropped first if they exist) +ALTER TYPE enum_alert_source RENAME TO enum_alert_source_old; +CREATE TYPE enum_alert_source AS ENUM ( + 'grafana', + 'manual' +); +ALTER TABLE alerts + ALTER COLUMN source DROP DEFAULT, + ALTER COLUMN source TYPE enum_alert_source USING source::TEXT::enum_alert_source, + ALTER COLUMN source SET DEFAULT 'manual';; +DROP TYPE enum_alert_source_old; + diff --git a/migrate/migrations/20170817102712-atomic-escalation-policies.sql b/migrate/migrations/20170817102712-atomic-escalation-policies.sql new file mode 100644 index 0000000000..d0dd14b84d --- /dev/null +++ b/migrate/migrations/20170817102712-atomic-escalation-policies.sql @@ -0,0 +1,206 @@ + +-- +migrate Up + +-- disable re-opening alerts +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_prevent_reopen() + RETURNS trigger AS +$BODY$ + BEGIN + IF OLD.status = 'closed' THEN + RAISE EXCEPTION 'cannot change status of closed alert'; + END IF; + RETURN NEW; + END; +$BODY$ + LANGUAGE plpgsql VOLATILE; +-- +migrate StatementEnd + +CREATE TRIGGER trg_prevent_reopen + BEFORE UPDATE OF status + ON alerts + FOR EACH ROW + EXECUTE PROCEDURE fn_prevent_reopen(); + + + +-- collect EP snapshots when alerts are generated +CREATE TABLE alert_escalation_policy_snapshots ( + alert_id BIGINT NOT NULL REFERENCES alerts (id) ON DELETE CASCADE, + step_number INT NOT NULL, + step_max INT NOT NULL, + step_delay INTERVAL NOT NULL, + repeat INT NOT NULL, + user_id UUID REFERENCES users (id) ON DELETE CASCADE, + schedule_id UUID REFERENCES schedules (id) ON DELETE CASCADE +); + +CREATE VIEW alert_escalation_policies AS + WITH step_max AS ( + SELECT escalation_policy_id, count(step_number) as step_max + FROM escalation_policy_steps + GROUP BY escalation_policy_id + ) + SELECT a.id as alert_id, step.step_number, m.step_max, (step.delay::TEXT||' minutes')::INTERVAL as step_delay, e.repeat, act.user_id, act.schedule_id + FROM + alerts a, + escalation_policies e, + escalation_policy_steps step, + step_max m, + escalation_policy_actions act, + services svc + WHERE a.service_id = svc.id + AND e.id = svc.escalation_policy_id + AND step.escalation_policy_id = m.escalation_policy_id + AND step.escalation_policy_id = svc.escalation_policy_id + AND act.escalation_policy_step_id = step.id; + + +INSERT INTO alert_escalation_policy_snapshots + (alert_id, step_number, step_max, step_delay, repeat, user_id, schedule_id) +SELECT alert_id, step_number, step_max, step_delay, repeat, user_id, schedule_id +FROM alert_escalation_policies; + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_snapshot_escalation_policy() + RETURNS trigger AS +$BODY$ + BEGIN + INSERT INTO alert_escalation_policy_snapshots + (alert_id, step_number, step_max, step_delay, repeat, user_id, schedule_id) + SELECT alert_id, step_number, step_max, step_delay, repeat, user_id, schedule_id + FROM alert_escalation_policies pol + WHERE pol.alert_id = NEW.id; + + RETURN NEW; + END; +$BODY$ + LANGUAGE plpgsql VOLATILE; +-- +migrate StatementEnd + + +CREATE TRIGGER trg_snapshot_escalation_policy + AFTER INSERT + ON alerts + FOR EACH ROW + EXECUTE PROCEDURE fn_snapshot_escalation_policy(); + + + +-- Use snapshots when calculating notifications + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION escalate_alerts() RETURNS VOID AS + $$ + BEGIN + UPDATE alerts + SET escalation_level = escalation_level + 1, last_escalation = now() + FROM alert_escalation_policy_snapshots e + WHERE (last_escalation + e.step_delay) < now() + AND status = 'triggered' + AND id = e.alert_id + AND e.step_number = (escalation_level % e.step_max) + AND (e.repeat = -1 OR (escalation_level+1) / e.step_max <= e.repeat); + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + + +CREATE OR REPLACE VIEW on_call_alert_users AS + WITH alert_users AS ( + SELECT s.user_id, + s.schedule_id, + s.alert_id, + a.status, + a.escalation_level + FROM + alerts a, + alert_escalation_policy_snapshots s + WHERE s.alert_id = a.id + AND s.step_number = (a.escalation_level % s.step_max) + AND a.status <> 'closed' + ) + SELECT DISTINCT au.alert_id, + au.status, + CASE + WHEN au.user_id IS NULL THEN oc.user_id + ELSE au.user_id + END AS user_id, + au.escalation_level + FROM alert_users au + LEFT JOIN on_call oc ON au.schedule_id = oc.schedule_id; + +DROP VIEW alert_escalation_levels; + +-- +migrate Down + + +CREATE VIEW alert_escalation_levels AS + SELECT + alerts.id AS alert_id, + count(step.id) AS levels, + alerts.escalation_level::bigint % count(step.id) AS relative_level + FROM + alerts, + escalation_policy_steps step, + services s + WHERE step.escalation_policy_id = s.escalation_policy_id + AND s.id = alerts.service_id + GROUP BY alerts.id; + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION escalate_alerts() RETURNS VOID AS + $$ + BEGIN + UPDATE alerts a + SET escalation_level = escalation_level + 1, last_escalation = now() + FROM services s, escalation_policy_steps step, alert_escalation_levels lvl, escalation_policies e + WHERE (last_escalation + (step.delay::TEXT||' minutes')::interval) < now() + AND a.status = 'triggered'::enum_alert_status + AND s.id = a.service_id + AND step.escalation_policy_id = s.escalation_policy_id + AND lvl.alert_id = a.id + AND step.step_number = lvl.relative_level + AND e.id = s.escalation_policy_id + AND (e.repeat = -1 OR (escalation_level+1) / lvl.levels <= e.repeat); + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + + +CREATE OR REPLACE VIEW on_call_alert_users AS + WITH alert_users AS ( + SELECT act.user_id, + act.schedule_id, + a.id AS alert_id, + a.status, + a.escalation_level + FROM alerts a, + services s, + alert_escalation_levels lvl, + escalation_policy_steps step, + escalation_policy_actions act + WHERE s.id = a.service_id + AND lvl.alert_id = a.id + AND step.escalation_policy_id = s.escalation_policy_id + AND step.step_number = lvl.relative_level + AND a.status <> 'closed'::enum_alert_status + AND act.escalation_policy_step_id = step.id + GROUP BY act.user_id, act.schedule_id, a.id + ) + SELECT DISTINCT au.alert_id, + au.status, + CASE + WHEN au.user_id IS NULL THEN oc.user_id + ELSE au.user_id + END AS user_id, + au.escalation_level + FROM alert_users au + LEFT JOIN on_call oc ON au.schedule_id = oc.schedule_id; + +DROP TRIGGER trg_snapshot_escalation_policy ON alerts; +DROP TRIGGER trg_prevent_reopen ON alerts; +DROP FUNCTION fn_snapshot_escalation_policy(); +DROP FUNCTION fn_prevent_reopen(); +DROP TABLE alert_escalation_policy_snapshots; +DROP VIEW alert_escalation_policies; diff --git a/migrate/migrations/20170818135106-add-gravatar-col-to-user.sql b/migrate/migrations/20170818135106-add-gravatar-col-to-user.sql new file mode 100644 index 0000000000..bb2f0e4e0c --- /dev/null +++ b/migrate/migrations/20170818135106-add-gravatar-col-to-user.sql @@ -0,0 +1,8 @@ + +-- +migrate Up + +ALTER TABLE users ADD COLUMN avatar_url TEXT NOT NULL DEFAULT ''; + +-- +migrate Down + +ALTER TABLE users DROP COLUMN avatar_url; diff --git a/migrate/migrations/20170825124926-escalation-policy-step-reorder.sql b/migrate/migrations/20170825124926-escalation-policy-step-reorder.sql new file mode 100644 index 0000000000..d83fdd9cb6 --- /dev/null +++ b/migrate/migrations/20170825124926-escalation-policy-step-reorder.sql @@ -0,0 +1,42 @@ + +-- +migrate Up + +ALTER TABLE escalation_policy_steps + DROP CONSTRAINT escalation_policy_steps_escalation_policy_id_step_number_key, + ADD UNIQUE(escalation_policy_id, step_number) DEFERRABLE INITIALLY DEFERRED; -- Needs to be deferrable in order to reorder steps else query will fail. + +-- +migrate StatementBegin + +CREATE OR REPLACE FUNCTION move_escalation_policy_step(_id UUID, _new_pos INT) RETURNS VOID AS + $$ + DECLARE + _old_pos INT; + _epid UUID; + BEGIN + SELECT step_number, escalation_policy_id into _old_pos, _epid FROM escalation_policy_steps WHERE id = _id; + IF _old_pos > _new_pos THEN + UPDATE escalation_policy_steps + SET step_number = step_number + 1 + WHERE escalation_policy_id = _epid + AND step_number < _old_pos + AND step_number >= _new_pos; + ELSE + UPDATE escalation_policy_steps + SET step_number = step_number - 1 + WHERE escalation_policy_id = _epid + AND step_number > _old_pos + AND step_number <= _new_pos; + END IF; + UPDATE escalation_policy_steps + SET step_number = _new_pos + WHERE id = _id; + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd +-- +migrate Down + +ALTER TABLE escalation_policy_steps + DROP CONSTRAINT escalation_policy_steps_escalation_policy_id_step_number_key, + ADD CONSTRAINT escalation_policy_steps_escalation_policy_id_step_number_key UNIQUE (escalation_policy_id, step_number); + +DROP FUNCTION move_escalation_policy_step(_id UUID, _new_pos INT); diff --git a/migrate/migrations/20171024114842-adjust-notification-create-at-check.sql b/migrate/migrations/20171024114842-adjust-notification-create-at-check.sql new file mode 100644 index 0000000000..5da5e65af5 --- /dev/null +++ b/migrate/migrations/20171024114842-adjust-notification-create-at-check.sql @@ -0,0 +1,62 @@ + +-- +migrate Up + +CREATE OR REPLACE VIEW user_notification_cycle_state AS + SELECT DISTINCT + c.alert_id, + nr.id AS notification_rule_id, + nr.user_id, + c.id AS cycle_id, + (nr.delay_minutes::TEXT||' minutes')::INTERVAL > now()-c.started_at AS future, + nr.id NOT IN ( + SELECT notification_rule_id + FROM sent_notifications + WHERE alert_id = c.alert_id + AND cycle_id = c.id + AND contact_method_id = nr.contact_method_id) AS pending, + c.escalation_level + FROM + user_notification_cycles c, + alerts a, + user_notification_rules nr + WHERE a.id = c.alert_id + AND a.status = 'triggered' + AND nr.user_id = c.user_id + AND nr.created_at <= c.started_at + (nr.delay_minutes::TEXT||' minutes')::INTERVAL + AND nr.id NOT IN ( + SELECT notification_rule_id + FROM sent_notifications + WHERE alert_id = c.alert_id + AND cycle_id = c.id + AND contact_method_id = nr.contact_method_id); + +-- +migrate Down + +CREATE OR REPLACE VIEW user_notification_cycle_state AS + SELECT DISTINCT + c.alert_id, + nr.id AS notification_rule_id, + nr.user_id, + c.id AS cycle_id, + (nr.delay_minutes::TEXT||' minutes')::INTERVAL > now()-c.started_at AS future, + nr.id NOT IN ( + SELECT notification_rule_id + FROM sent_notifications + WHERE alert_id = c.alert_id + AND cycle_id = c.id + AND contact_method_id = nr.contact_method_id) AS pending, + c.escalation_level + FROM + user_notification_cycles c, + alerts a, + user_notification_rules nr + WHERE a.id = c.alert_id + AND a.status = 'triggered' + AND nr.user_id = c.user_id + AND nr.created_at < c.started_at + (nr.delay_minutes::TEXT||' minutes')::INTERVAL + AND nr.id NOT IN ( + SELECT notification_rule_id + FROM sent_notifications + WHERE alert_id = c.alert_id + AND cycle_id = c.id + AND contact_method_id = nr.contact_method_id); diff --git a/migrate/migrations/20171027145352-dont-notify-disabled-cms.sql b/migrate/migrations/20171027145352-dont-notify-disabled-cms.sql new file mode 100644 index 0000000000..012825382f --- /dev/null +++ b/migrate/migrations/20171027145352-dont-notify-disabled-cms.sql @@ -0,0 +1,57 @@ + +-- +migrate Up + +CREATE OR REPLACE VIEW needs_notification_sent AS + SELECT DISTINCT + cs.alert_id, + nr.contact_method_id, + cm.type, + cm.value, + a.description, + s.name AS service_name, + nr.id AS notification_rule_id, + cs.escalation_level, + cs.cycle_id + FROM + user_notification_cycle_state cs, + alerts a, + user_contact_methods cm, + user_notification_rules nr, + services s + WHERE a.id = cs.alert_id + AND a.status = 'triggered'::enum_alert_status + AND cs.escalation_level = a.escalation_level + AND cm.id = nr.contact_method_id + AND nr.id = cs.notification_rule_id + AND s.id = a.service_id + AND cs.pending + AND NOT cs.future + AND cm.disabled = FALSE; + +-- +migrate Down + +CREATE OR REPLACE VIEW needs_notification_sent AS + SELECT DISTINCT + cs.alert_id, + nr.contact_method_id, + cm.type, + cm.value, + a.description, + s.name AS service_name, + nr.id AS notification_rule_id, + cs.escalation_level, + cs.cycle_id + FROM + user_notification_cycle_state cs, + alerts a, + user_contact_methods cm, + user_notification_rules nr, + services s + WHERE a.id = cs.alert_id + AND a.status = 'triggered'::enum_alert_status + AND cs.escalation_level = a.escalation_level + AND cm.id = nr.contact_method_id + AND nr.id = cs.notification_rule_id + AND s.id = a.service_id + AND cs.pending + AND NOT cs.future; \ No newline at end of file diff --git a/migrate/migrations/20171030130758-ev3-drop-views.sql b/migrate/migrations/20171030130758-ev3-drop-views.sql new file mode 100644 index 0000000000..a84c7b0f55 --- /dev/null +++ b/migrate/migrations/20171030130758-ev3-drop-views.sql @@ -0,0 +1,245 @@ + +-- +migrate Up + +drop view on_call_next_rotation; +drop view on_call_alert_users; +drop view on_call; +drop view alert_escalation_policies; +drop view needs_notification_sent; +drop view user_notification_cycle_state; + +-- +migrate Down + + +CREATE OR REPLACE VIEW on_call AS + WITH rotation_details AS ( + SELECT + rotations.id, + rotations.schedule_id, + rotations.start_time, + (((rotations.shift_length)::text || + CASE + WHEN (rotations.type = 'hourly') THEN ' hours' + WHEN (rotations.type = 'daily') THEN ' days' + ELSE ' weeks' + END))::interval AS shift, + (( + CASE + WHEN (rotations.type = 'hourly') THEN (date_part('epoch', ((now() at time zone s.time_zone) - (rotations.start_time at time zone s.time_zone)))::bigint / 3600) -- number of hours + WHEN (rotations.type = 'daily') THEN date_part('days', ((now() at time zone s.time_zone) - (rotations.start_time at time zone s.time_zone)))::bigint -- number of days + ELSE (date_part('days'::text, ((now() at time zone s.time_zone) - (rotations.start_time at time zone s.time_zone)))::bigint / 7) -- number of weeks + END / rotations.shift_length)) AS shift_number + FROM + rotations, + schedules s + WHERE s.id = rotations.schedule_id + AND rotations.start_time <= now() + ), + p_count AS ( + SELECT + rp.rotation_id, + count(rp.id) AS count + FROM + rotation_participants rp, + rotation_details d_1 + WHERE (rp.rotation_id = d_1.id) + GROUP BY rp.rotation_id + ), + current_participant AS ( + SELECT + rp.user_id, + p.rotation_id + FROM + rotation_participants rp, + rotation_details d_1, + p_count p + WHERE ((rp.rotation_id = d_1.id) + AND (p.rotation_id = rp.rotation_id) + AND (rp."position" = (d_1.shift_number % p.count))) + ), + next_participant AS ( + SELECT + rp.user_id, + p.rotation_id + FROM + rotation_participants rp, + rotation_details d_1, + p_count p + WHERE ((rp.rotation_id = d_1.id) + AND (p.rotation_id = rp.rotation_id) + AND (rp."position" = ((d_1.shift_number + 1) % p.count))) + ) + SELECT + d.schedule_id, + d.id AS rotation_id, + c.user_id, + n.user_id AS next_user_id, + ((d.shift * (d.shift_number)::bigint) + d.start_time) AS start_time, + ((d.shift * ((d.shift_number + 1))::bigint) + d.start_time) AS end_time, + d.shift_number + FROM + rotation_details d, + current_participant c, + next_participant n + WHERE ((d.id = c.rotation_id) + AND (c.rotation_id = n.rotation_id)); + +CREATE OR REPLACE VIEW on_call_alert_users AS + WITH alert_users AS ( + SELECT s.user_id, + s.schedule_id, + s.alert_id, + a.status, + a.escalation_level + FROM + alerts a, + alert_escalation_policy_snapshots s + WHERE s.alert_id = a.id + AND s.step_number = (a.escalation_level % s.step_max) + AND a.status <> 'closed' + ) + SELECT DISTINCT au.alert_id, + au.status, + CASE + WHEN au.user_id IS NULL THEN oc.user_id + ELSE au.user_id + END AS user_id, + au.escalation_level + FROM alert_users au + LEFT JOIN on_call oc ON au.schedule_id = oc.schedule_id; + +CREATE OR REPLACE VIEW on_call_next_rotation AS +WITH + p_count AS ( + SELECT rotation_id, count(rp.position) + FROM + rotations r, + rotation_participants rp + WHERE r.id = rp.rotation_id + GROUP BY rotation_id + ) +SELECT + oc.schedule_id, + rp.rotation_id, + rp.user_id, + oc.next_user_id, + + ( + CASE WHEN oc.shift_number % p.count < rp.position + THEN rp.position-(oc.shift_number % p.count) + ELSE rp.position-(oc.shift_number % p.count)+p.count + END + ) * (oc.end_time-oc.start_time) + oc.start_time start_time, + + ( + CASE WHEN oc.shift_number % p.count < rp.position + THEN rp.position-(oc.shift_number % p.count) + ELSE rp.position-(oc.shift_number % p.count)+p.count + END + ) * (oc.end_time-oc.start_time) + oc.end_time end_time, + + ( + CASE WHEN oc.shift_number % p.count < rp.position + THEN rp.position-(oc.shift_number % p.count) + ELSE rp.position-(oc.shift_number % p.count)+p.count + END + ) + oc.shift_number shift_number + +FROM + rotations r, + rotation_participants rp, + p_count p, + on_call oc +WHERE p.rotation_id = r.id + AND rp.rotation_id = r.id + AND oc.rotation_id = r.id +GROUP BY + rp.user_id, + rp.rotation_id, + oc.shift_number, + p.count, + shift_length, + type, + oc.start_time, + oc.end_time, + rp.position, + oc.schedule_id, + oc.next_user_id; + +CREATE VIEW alert_escalation_policies AS + WITH step_max AS ( + SELECT escalation_policy_steps.escalation_policy_id, + count(escalation_policy_steps.step_number) AS step_max + FROM escalation_policy_steps + GROUP BY escalation_policy_steps.escalation_policy_id + ) + SELECT a.id AS alert_id, + step.step_number, + m.step_max, + (step.delay::text || ' minutes'::text)::interval AS step_delay, + e.repeat, + act.user_id, + act.schedule_id + FROM alerts a, + escalation_policies e, + escalation_policy_steps step, + step_max m, + escalation_policy_actions act, + services svc + WHERE a.service_id = svc.id AND e.id = svc.escalation_policy_id AND step.escalation_policy_id = m.escalation_policy_id AND step.escalation_policy_id = svc.escalation_policy_id AND act.escalation_policy_step_id = step.id; + + +CREATE VIEW user_notification_cycle_state AS + SELECT DISTINCT + c.alert_id, + nr.id AS notification_rule_id, + nr.user_id, + c.id AS cycle_id, + (nr.delay_minutes::TEXT||' minutes')::INTERVAL > now()-c.started_at AS future, + nr.id NOT IN ( + SELECT notification_rule_id + FROM sent_notifications + WHERE alert_id = c.alert_id + AND cycle_id = c.id + AND contact_method_id = nr.contact_method_id) AS pending, + c.escalation_level + FROM + user_notification_cycles c, + alerts a, + user_notification_rules nr + WHERE a.id = c.alert_id + AND a.status = 'triggered' + AND nr.user_id = c.user_id + AND nr.created_at <= c.started_at + (nr.delay_minutes::TEXT||' minutes')::INTERVAL + AND nr.id NOT IN ( + SELECT notification_rule_id + FROM sent_notifications + WHERE alert_id = c.alert_id + AND cycle_id = c.id + AND contact_method_id = nr.contact_method_id); +CREATE VIEW needs_notification_sent AS + SELECT DISTINCT + cs.alert_id, + nr.contact_method_id, + cm.type, + cm.value, + a.description, + s.name AS service_name, + nr.id AS notification_rule_id, + cs.escalation_level, + cs.cycle_id + FROM + user_notification_cycle_state cs, + alerts a, + user_contact_methods cm, + user_notification_rules nr, + services s + WHERE a.id = cs.alert_id + AND a.status = 'triggered'::enum_alert_status + AND cs.escalation_level = a.escalation_level + AND cm.id = nr.contact_method_id + AND nr.id = cs.notification_rule_id + AND s.id = a.service_id + AND cs.pending + AND NOT cs.future + AND cm.disabled = FALSE; diff --git a/migrate/migrations/20171030130759-ev3-schedule-rules.sql b/migrate/migrations/20171030130759-ev3-schedule-rules.sql new file mode 100644 index 0000000000..4d73752a48 --- /dev/null +++ b/migrate/migrations/20171030130759-ev3-schedule-rules.sql @@ -0,0 +1,31 @@ + +-- +migrate Up + +CREATE TABLE schedule_rules ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + schedule_id UUID NOT NULL REFERENCES schedules (id) ON DELETE CASCADE, + sunday BOOLEAN NOT NULL DEFAULT true, + monday BOOLEAN NOT NULL DEFAULT true, + tuesday BOOLEAN NOT NULL DEFAULT true, + wednesday BOOLEAN NOT NULL DEFAULT true, + thursday BOOLEAN NOT NULL DEFAULT true, + friday BOOLEAN NOT NULL DEFAULT true, + saturday BOOLEAN NOT NULL DEFAULT true, + start_time TIME NOT NULL DEFAULT '00:00:00', + end_time TIME NOT NULL DEFAULT '23:59:59', + + created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), + + tgt_user_id UUID REFERENCES users (id) ON DELETE CASCADE, + tgt_rotation_id UUID REFERENCES rotations (id) ON DELETE CASCADE, + + CHECK( + (tgt_user_id IS NULL AND tgt_rotation_id IS NOT NULL) + OR + (tgt_user_id IS NOT NULL AND tgt_rotation_id IS NULL) + ) +); + +-- +migrate Down + +DROP TABLE schedule_rules; diff --git a/migrate/migrations/20171030130800-ev3-notification-policy.sql b/migrate/migrations/20171030130800-ev3-notification-policy.sql new file mode 100644 index 0000000000..5fd7484c8f --- /dev/null +++ b/migrate/migrations/20171030130800-ev3-notification-policy.sql @@ -0,0 +1,50 @@ + +-- +migrate Up + +-- future work, for now we are using the user_id as the policy id + +-- CREATE TABLE notification_policies ( +-- id UUID PRIMARY KEY DEFAULT gen_random_uuid(), +-- repeat_count INT NOT NULL DEFAULT 0, +-- repeat_delay_minutes INT NOT NULL DEFAULT 1 +-- ); + +-- CREATE TABLE notification_policy_rules ( +-- id UUID PRIMARY KEY DEFAULT gen_random_uuid(), +-- notification_policy_id UUID NOT NULL REFERENCES notification_policies (id) ON DELETE CASCADE, +-- delay_minutes INT NOT NULL DEFAULT 0, +-- created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now() +-- ); + +CREATE TABLE notification_policy_cycles ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + user_id UUID NOT NULL REFERENCES users (id) ON DELETE CASCADE, + alert_id INT NOT NULL REFERENCES alerts (id) ON DELETE CASCADE, + repeat_count INT NOT NULL DEFAULT 0, + started_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), + checked BOOLEAN NOT NULL DEFAULT TRUE, + + UNIQUE(user_id, alert_id) +); + +INSERT INTO notification_policy_cycles ( + id, + user_id, + alert_id, + started_at +) +SELECT + c.id, + c.user_id, + c.alert_id, + c.started_at +FROM + user_notification_cycles c, + alerts a +WHERE + a.id = c.alert_id AND + a.escalation_level = c.escalation_level; + + +-- +migrate Down +DROP TABLE notification_policy_cycles; diff --git a/migrate/migrations/20171030130801-ev3-escalation-policy-state.sql b/migrate/migrations/20171030130801-ev3-escalation-policy-state.sql new file mode 100644 index 0000000000..f19a188959 --- /dev/null +++ b/migrate/migrations/20171030130801-ev3-escalation-policy-state.sql @@ -0,0 +1,62 @@ + +-- +migrate Up +CREATE TABLE escalation_policy_state ( + escalation_policy_id UUID NOT NULL REFERENCES escalation_policies (id) ON DELETE CASCADE, + escalation_policy_step_id UUID REFERENCES escalation_policy_steps (id) ON DELETE SET NULL, + escalation_policy_step_number INT NOT NULL DEFAULT 0, + alert_id BIGINT NOT NULL REFERENCES alerts (id) ON DELETE CASCADE, + last_escalation TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), + loop_count INT NOT NULL DEFAULT 0, + force_escalation BOOLEAN NOT NULL DEFAULT false, + + UNIQUE(alert_id, escalation_policy_id) +); + +WITH ep_step_count AS ( + SELECT count(id) as max, escalation_policy_id + FROM escalation_policy_steps + GROUP BY escalation_policy_id +) +INSERT INTO escalation_policy_state ( + escalation_policy_id, + escalation_policy_step_id, + alert_id, + last_escalation, + loop_count +) +SELECT + svc.escalation_policy_id, + step.id, + alert.id, + alert.last_escalation, + alert.escalation_level / cnt.max +FROM + alerts alert, + services svc, + ep_step_count cnt, + escalation_policy_steps step +WHERE svc.id = alert.service_id + AND step.escalation_policy_id = svc.escalation_policy_id + AND cnt.escalation_policy_id = svc.escalation_policy_id + AND step.step_number = alert.escalation_level % cnt.max; + +-- +migrate Down + +WITH ep_step_count AS ( + SELECT count(id) as max, escalation_policy_id + FROM escalation_policy_steps + GROUP BY escalation_policy_id +) +UPDATE alerts +SET + escalation_level = cnt.max * state.loop_count + step.step_number, + last_escalation = state.last_escalation +FROM + escalation_policy_state state, + escalation_policy_steps step, + ep_step_count cnt +WHERE step.id = state.escalation_policy_step_id + AND alerts.id = state.alert_id + AND cnt.escalation_policy_id = state.escalation_policy_id; + +DROP TABLE escalation_policy_state; diff --git a/migrate/migrations/20171030130802-ev3-rotations.sql b/migrate/migrations/20171030130802-ev3-rotations.sql new file mode 100644 index 0000000000..f939227209 --- /dev/null +++ b/migrate/migrations/20171030130802-ev3-rotations.sql @@ -0,0 +1,16 @@ + +-- +migrate Up + + +CREATE TABLE rotation_state ( + rotation_id UUID PRIMARY KEY REFERENCES rotations (id) ON DELETE CASCADE, + position INT NOT NULL DEFAULT 0, + + -- it's ok if it's NULL, we just resume based on position + rotation_participant_id UUID REFERENCES rotation_participants (id) ON DELETE SET NULL, + shift_start TIMESTAMP WITH TIME ZONE NOT NULL +); + +-- +migrate Down + +DROP TABLE rotation_state; diff --git a/migrate/migrations/20171030130804-ev3-assign-schedule-rotations.sql b/migrate/migrations/20171030130804-ev3-assign-schedule-rotations.sql new file mode 100644 index 0000000000..2d1f171133 --- /dev/null +++ b/migrate/migrations/20171030130804-ev3-assign-schedule-rotations.sql @@ -0,0 +1,43 @@ + +-- +migrate Up + +ALTER TABLE rotations + ADD COLUMN time_zone TEXT; + +-- inherit timezone +UPDATE rotations +SET + time_zone = s.time_zone, + name = s.name||' Rotation' +FROM schedules s +WHERE s.id = schedule_id; + +ALTER TABLE rotations + ALTER COLUMN time_zone SET NOT NULL, + ADD CONSTRAINT rotations_name_unique UNIQUE (name); + +INSERT INTO schedule_rules (schedule_id, tgt_rotation_id) +SELECT schedule_id, id +FROM rotations; + +ALTER TABLE rotations + DROP COLUMN schedule_id, + ALTER COLUMN time_zone SET NOT NULL; + +-- +migrate Down + +ALTER TABLE rotations + DROP COLUMN time_zone, + ADD COLUMN schedule_id UUID REFERENCES schedules (id) ON DELETE CASCADE, + DROP CONSTRAINT rotations_name_unique; + +UPDATE rotations rot +SET schedule_id = rule.schedule_id +FROM schedule_rules rule +WHERE rule.tgt_rotation_id = rot.id; + +ALTER TABLE rotations + ALTER COLUMN schedule_id SET NOT NULL, + ADD CONSTRAINT rotations_schedule_id_name_key UNIQUE (schedule_id, name); + +DELETE FROM schedule_rules; diff --git a/migrate/migrations/20171030130806-ev3-add-rotation-ep-action.sql b/migrate/migrations/20171030130806-ev3-add-rotation-ep-action.sql new file mode 100644 index 0000000000..f5d5fe01b9 --- /dev/null +++ b/migrate/migrations/20171030130806-ev3-add-rotation-ep-action.sql @@ -0,0 +1,35 @@ + +-- +migrate Up + +ALTER TABLE escalation_policy_actions + ADD COLUMN rotation_id UUID REFERENCES rotations (id) ON DELETE CASCADE, + + DROP CONSTRAINT escalation_policy_actions_escalation_policy_step_id_schedul_key, + DROP CONSTRAINT escalation_policy_actions_check, + + ADD CONSTRAINT epa_no_duplicate_users UNIQUE(escalation_policy_step_id, user_id), + ADD CONSTRAINT epa_no_duplicate_schedules UNIQUE(escalation_policy_step_id, schedule_id), + ADD CONSTRAINT epa_no_duplicate_rotations UNIQUE(escalation_policy_step_id, rotation_id), + ADD CONSTRAINT epa_there_can_only_be_one CHECK ( + (user_id IS NULL AND schedule_id IS NULL AND rotation_id IS NOT NULL) + OR + (user_id IS NULL AND schedule_id IS NOT NULL AND rotation_id IS NULL) + OR + (user_id IS NOT NULL AND schedule_id IS NULL AND rotation_id IS NULL) + ); + + +-- +migrate Down + +DELETE FROM escalation_policy_actions WHERE rotation_id IS NOT NULL; + +ALTER TABLE escalation_policy_actions + DROP COLUMN rotation_id, + DROP CONSTRAINT epa_no_duplicate_schedules, + DROP CONSTRAINT epa_no_duplicate_users, + ADD CONSTRAINT escalation_policy_actions_escalation_policy_step_id_schedul_key UNIQUE(escalation_policy_step_id, schedule_id, user_id), + ADD CONSTRAINT escalation_policy_actions_check CHECK ( + (schedule_id IS NOT NULL AND user_id IS NULL) + OR + (user_id IS NOT NULL AND schedule_id IS NULL) + ); diff --git a/migrate/migrations/20171030130810-ev3-notification-logs.sql b/migrate/migrations/20171030130810-ev3-notification-logs.sql new file mode 100644 index 0000000000..50756206a5 --- /dev/null +++ b/migrate/migrations/20171030130810-ev3-notification-logs.sql @@ -0,0 +1,35 @@ + +-- +migrate Up + +CREATE TABLE notification_logs ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + alert_id BIGINT NOT NULL REFERENCES alerts (id) ON DELETE CASCADE, + contact_method_id UUID NOT NULL REFERENCES user_contact_methods (id) ON DELETE CASCADE, + process_timestamp TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), + completed BOOLEAN NOT NULL DEFAULT FALSE +); + +INSERT INTO notification_logs ( + id, + alert_id, + contact_method_id, + process_timestamp, + completed +) +SELECT + s.id, + s.alert_id, + s.contact_method_id, + s.sent_at, + TRUE +FROM + sent_notifications s +JOIN notification_policy_cycles c ON s.cycle_id = c.id -- only record sent notifications for active cycles, to preserve old behavior +WHERE + s.sent_at IS NOT NULL +ORDER BY s.sent_at DESC +ON CONFLICT DO NOTHING; + +-- +migrate Down + +DROP TABLE notification_logs; diff --git a/migrate/migrations/20171030130811-ev3-drop-ep-snapshot-trigger.sql b/migrate/migrations/20171030130811-ev3-drop-ep-snapshot-trigger.sql new file mode 100644 index 0000000000..765ae5668b --- /dev/null +++ b/migrate/migrations/20171030130811-ev3-drop-ep-snapshot-trigger.sql @@ -0,0 +1,30 @@ + +-- +migrate Up +DROP TRIGGER trg_snapshot_escalation_policy ON alerts; +DROP FUNCTION fn_snapshot_escalation_policy(); + +-- +migrate Down + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_snapshot_escalation_policy() + RETURNS trigger AS +$BODY$ + BEGIN + INSERT INTO alert_escalation_policy_snapshots + (alert_id, step_number, step_max, step_delay, repeat, user_id, schedule_id) + SELECT alert_id, step_number, step_max, step_delay, repeat, user_id, schedule_id + FROM alert_escalation_policies pol + WHERE pol.alert_id = NEW.id; + + RETURN NEW; + END; +$BODY$ + LANGUAGE plpgsql VOLATILE; +-- +migrate StatementEnd + + +CREATE TRIGGER trg_snapshot_escalation_policy + AFTER INSERT + ON alerts + FOR EACH ROW + EXECUTE PROCEDURE fn_snapshot_escalation_policy(); \ No newline at end of file diff --git a/migrate/migrations/20171030130812-ev3-rotation-state.sql b/migrate/migrations/20171030130812-ev3-rotation-state.sql new file mode 100644 index 0000000000..2ea296991c --- /dev/null +++ b/migrate/migrations/20171030130812-ev3-rotation-state.sql @@ -0,0 +1,57 @@ + +-- +migrate Up + + +WITH rotation_details AS ( + SELECT + rotations.id, + rotations.start_time, + (((rotations.shift_length)::text || + CASE + WHEN (rotations.type = 'hourly') THEN ' hours' + WHEN (rotations.type = 'daily') THEN ' days' + ELSE ' weeks' + END))::interval AS shift, + (( + CASE + WHEN (type = 'hourly') THEN (date_part('epoch', ((now() at time zone time_zone) - (start_time at time zone time_zone)))::bigint / 3600) -- number of hours + WHEN (type = 'daily') THEN date_part('days', ((now() at time zone time_zone) - (start_time at time zone time_zone)))::bigint -- number of days + ELSE (date_part('days'::text, ((now() at time zone time_zone) - (start_time at time zone time_zone)))::bigint / 7) -- number of weeks + END / shift_length)) AS shift_number + FROM + rotations + WHERE start_time <= now() +), +p_count AS ( + SELECT + part.rotation_id, + count(part.id) AS count + FROM rotation_details rot + JOIN rotation_participants part ON part.rotation_id = rot.id + GROUP BY part.rotation_id +), +current_participant AS ( + SELECT + part.id, + part."position", + pc.rotation_id + FROM rotation_details rot + JOIN p_count pc ON pc.rotation_id = rot.id + JOIN rotation_participants part ON part.rotation_id = rot.id AND part."position" = (rot.shift_number % pc.count) +) +INSERT INTO rotation_state ( + rotation_id, + rotation_participant_id, + position, + shift_start +) +SELECT + cp.rotation_id, + cp.id, + cp.position, + rd.start_time + (rd.shift * rd.shift_number)::interval +FROM rotation_details rd +JOIN current_participant cp ON cp.rotation_id = rd.id; + +-- +migrate Down +TRUNCATE rotation_state; diff --git a/migrate/migrations/20171030130813-ev3-throttle-locks.sql b/migrate/migrations/20171030130813-ev3-throttle-locks.sql new file mode 100644 index 0000000000..3f2015cb61 --- /dev/null +++ b/migrate/migrations/20171030130813-ev3-throttle-locks.sql @@ -0,0 +1,42 @@ + +-- +migrate Up + +CREATE TABLE process_alerts ( + alert_id BIGINT PRIMARY KEY REFERENCES alerts(id) ON DELETE CASCADE, + client_id UUID, + deadline TIMESTAMP WITH TIME ZONE, + last_processed TIMESTAMP WITH TIME ZONE +); + +CREATE INDEX process_alerts_oldest_first ON process_alerts (last_processed ASC NULLS FIRST); + +CREATE TABLE process_rotations ( + rotation_id UUID PRIMARY KEY REFERENCES rotations(id) ON DELETE CASCADE, + client_id UUID, + deadline TIMESTAMP WITH TIME ZONE, + last_processed TIMESTAMP WITH TIME ZONE +); + +CREATE INDEX process_rotations_oldest_first ON process_rotations (last_processed ASC NULLS FIRST); + +ALTER TABLE alerts + ADD COLUMN last_processed TIMESTAMP WITH TIME ZONE; + +ALTER TABLE rotations + ADD COLUMN last_processed TIMESTAMP WITH TIME ZONE; + +UPDATE alerts +SET last_processed = last_action_time +FROM throttle +WHERE status != 'closed'; + +-- +migrate Down + +DROP TABLE process_alerts; +DROP TABLE process_rotations; + +ALTER TABLE alerts + DROP COLUMN last_processed; + +ALTER TABLE rotations + DROP COLUMN last_processed; diff --git a/migrate/migrations/20171030150519-ev3-remove-status-trigger.sql b/migrate/migrations/20171030150519-ev3-remove-status-trigger.sql new file mode 100644 index 0000000000..a3ccdb7ce2 --- /dev/null +++ b/migrate/migrations/20171030150519-ev3-remove-status-trigger.sql @@ -0,0 +1,36 @@ + +-- +migrate Up + +DROP TRIGGER log_alert_status_changed ON alerts; +DROP FUNCTION log_alert_status_changed_insert(); + +-- +migrate Down + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION log_alert_status_changed_insert() RETURNS TRIGGER AS + $$ + BEGIN + IF NEW.status = 'closed'::enum_alert_status THEN + INSERT INTO alert_logs (alert_id, event, message) VALUES ( + NEW.id, 'closed'::enum_alert_log_event, 'Closed' + ); + ELSIF OLD.status = 'closed'::enum_alert_status THEN + INSERT INTO alert_logs (alert_id, event, message) VALUES ( + NEW.id, 'reopened'::enum_alert_log_event, 'Reopened as '||NEW.status::TEXT + ); + ELSE + INSERT INTO alert_logs (alert_id, event, message) VALUES ( + NEW.id, 'status_changed'::enum_alert_log_event, 'Status updated from '||OLD.status::TEXT||' to '||NEW.status::TEXT + ); + END IF; + RETURN NEW; + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + + +CREATE TRIGGER log_alert_status_changed + AFTER UPDATE ON alerts + FOR EACH ROW + WHEN (OLD.status IS DISTINCT FROM NEW.status) + EXECUTE PROCEDURE log_alert_status_changed_insert(); diff --git a/migrate/migrations/20171126093536-schedule-rule-processing.sql b/migrate/migrations/20171126093536-schedule-rule-processing.sql new file mode 100644 index 0000000000..4e7c0edbb6 --- /dev/null +++ b/migrate/migrations/20171126093536-schedule-rule-processing.sql @@ -0,0 +1,27 @@ + +-- +migrate Up + +ALTER TABLE schedules + ADD COLUMN last_processed TIMESTAMP WITH TIME ZONE; + +ALTER TABLE schedule_rules + ADD COLUMN is_active BOOLEAN NOT NULL DEFAULT FALSE; + +CREATE TABLE process_schedules ( + schedule_id UUID PRIMARY KEY REFERENCES schedules (id) ON DELETE CASCADE, + client_id UUID, + deadline TIMESTAMP WITH TIME ZONE, + last_processed TIMESTAMP WITH TIME ZONE +); + +CREATE INDEX process_schedules_oldest_first ON process_schedules (last_processed ASC NULLS FIRST); + +-- +migrate Down + +DROP TABLE process_schedules; + +ALTER TABLE schedules + DROP COLUMN last_processed; + +ALTER TABLE schedule_rules + DROP COLUMN is_active; diff --git a/migrate/migrations/20171201104359-structured-alert-logs.sql b/migrate/migrations/20171201104359-structured-alert-logs.sql new file mode 100644 index 0000000000..5c7a5df7fd --- /dev/null +++ b/migrate/migrations/20171201104359-structured-alert-logs.sql @@ -0,0 +1,66 @@ + +-- +migrate Up + +CREATE TYPE enum_alert_log_subject_type AS ENUM ( + 'user', + 'integration_key' +); + +ALTER TABLE alert_logs + ADD COLUMN sub_type enum_alert_log_subject_type, + ADD COLUMN sub_user_id UUID REFERENCES users (id) ON DELETE SET NULL, + ADD COLUMN sub_integration_key_id UUID REFERENCES integration_keys (id) ON DELETE SET NULL, + ADD COLUMN sub_classifier TEXT NOT NULL DEFAULT '', + ADD COLUMN meta JSON, + ADD CONSTRAINT alert_logs_one_subject CHECK( + NOT (sub_user_id IS NOT NULL AND sub_integration_key_id IS NOT NULL) + ) +; + +ALTER TABLE alerts + ADD COLUMN created_at TIMESTAMP WITH TIME ZONE; + +UPDATE alerts alert +SET created_at = "timestamp" +FROM alert_logs log +WHERE + log.alert_id = alert.id AND + log."event" = 'created'; + +ALTER TABLE alerts + ALTER COLUMN created_at SET NOT NULL, + ALTER COLUMN created_at SET DEFAULT now(); + +DROP TRIGGER log_alert_creation ON alerts; +DROP FUNCTION log_alert_creation_insert(); + +-- +migrate Down + +ALTER TABLE alert_logs + DROP COLUMN sub_type, + DROP COLUMN sub_user_id, + DROP COLUMN sub_integration_key_id, + DROP COLUMN sub_classifier, + DROP COLUMN meta; + +ALTER TABLE alerts + DROP COLUMN created_at; + +DROP TYPE enum_alert_log_subject_type; + +-- +migrate StatementBegin +CREATE FUNCTION log_alert_creation_insert() RETURNS TRIGGER AS + $$ + BEGIN + INSERT INTO alert_logs (alert_id, event, message) VALUES ( + NEW.id, 'created'::enum_alert_log_event, 'Created via: '||NEW.source::TEXT + ); + RETURN NEW; + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +CREATE TRIGGER log_alert_creation + AFTER INSERT ON alerts + FOR EACH ROW + EXECUTE PROCEDURE log_alert_creation_insert(); \ No newline at end of file diff --git a/migrate/migrations/20171201104433-add-alert-log-types.sql b/migrate/migrations/20171201104433-add-alert-log-types.sql new file mode 100644 index 0000000000..d48ed53c18 --- /dev/null +++ b/migrate/migrations/20171201104433-add-alert-log-types.sql @@ -0,0 +1,9 @@ + +-- +migrate Up notransaction + +ALTER TYPE enum_alert_log_event ADD VALUE IF NOT EXISTS 'acknowledged'; +ALTER TYPE enum_alert_log_event ADD VALUE IF NOT EXISTS 'policy_updated'; +ALTER TYPE enum_alert_log_event ADD VALUE IF NOT EXISTS 'duplicate_suppressed'; +ALTER TYPE enum_alert_log_event ADD VALUE IF NOT EXISTS 'escalation_request'; + +-- +migrate Down diff --git a/migrate/migrations/20171205125227-twilio-egress-sms-tracking.sql b/migrate/migrations/20171205125227-twilio-egress-sms-tracking.sql new file mode 100644 index 0000000000..3844e96254 --- /dev/null +++ b/migrate/migrations/20171205125227-twilio-egress-sms-tracking.sql @@ -0,0 +1,29 @@ + +-- +migrate Up + +CREATE TYPE enum_twilio_sms_status AS ENUM ( + 'unknown', -- in case twilio insists it doesn't exist when we ask + 'accepted', + 'queued', + 'sending', + 'sent', + 'receiving', + 'received', + 'delivered', + 'undelivered', + 'failed' +); + + +CREATE TABLE twilio_egress_sms_status ( + twilio_sid TEXT PRIMARY KEY, + last_status enum_twilio_sms_status NOT NULL, + sent_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), + last_update TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), + dest_number TEXT NOT NULL +); + +-- +migrate Down + +DROP TABLE twilio_egress_sms_status; +DROP TYPE enum_twilio_sms_status; diff --git a/migrate/migrations/20171211101108-twilio-egress-voice-tracking.sql b/migrate/migrations/20171211101108-twilio-egress-voice-tracking.sql new file mode 100644 index 0000000000..4bcd98eeae --- /dev/null +++ b/migrate/migrations/20171211101108-twilio-egress-voice-tracking.sql @@ -0,0 +1,31 @@ + + +-- +migrate Up + +CREATE TYPE enum_twilio_voice_status AS ENUM ( + 'unknown', -- in case twilio insists it doesn't exist when we ask + 'initiated', + 'queued', + 'ringing', + 'in-progress', + 'completed', + 'busy', + 'failed', + 'no-answer', + 'canceled' +); + + +CREATE TABLE twilio_egress_voice_status ( + twilio_sid TEXT PRIMARY KEY, + last_status enum_twilio_voice_status NOT NULL, + sent_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), + last_update TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), + dest_number TEXT NOT NULL, + last_sequence_number INT +); + +-- +migrate Down + +DROP TABLE twilio_egress_voice_status; +DROP TYPE enum_twilio_voice_status; diff --git a/migrate/migrations/20171213141802-add-alert-source-email.sql b/migrate/migrations/20171213141802-add-alert-source-email.sql new file mode 100644 index 0000000000..528533faba --- /dev/null +++ b/migrate/migrations/20171213141802-add-alert-source-email.sql @@ -0,0 +1,6 @@ + +-- +migrate Up notransaction + +ALTER TYPE enum_alert_source ADD VALUE IF NOT EXISTS 'email'; + +-- +migrate Down diff --git a/migrate/migrations/20171220113439-add-alert-dedup-keys.sql b/migrate/migrations/20171220113439-add-alert-dedup-keys.sql new file mode 100644 index 0000000000..73dfe6e364 --- /dev/null +++ b/migrate/migrations/20171220113439-add-alert-dedup-keys.sql @@ -0,0 +1,46 @@ + +-- +migrate Up + +LOCK alerts; + +ALTER TABLE alerts + ADD COLUMN dedup_key TEXT; + +UPDATE alerts +SET dedup_key = + concat( + 'auto:1:', + encode(digest(concat("description"), 'sha512'), 'hex') + ); + +ALTER TABLE alerts + ALTER COLUMN dedup_key SET NOT NULL; + +CREATE INDEX idx_dedup_alerts ON alerts (dedup_key); + +-- +migrate StatementBegin +CREATE FUNCTION fn_ensure_alert_dedup_key() RETURNS TRIGGER AS +$$ +BEGIN + IF NEW.dedup_key ISNULL THEN + NEW.dedup_key = + concat( + 'auto:1:', + encode(digest(concat(NEW."description"), 'sha512'), 'hex') + ); + END IF; + RETURN NEW; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +CREATE TRIGGER trg_ensure_alert_dedup_key BEFORE INSERT ON alerts +FOR EACH ROW EXECUTE PROCEDURE fn_ensure_alert_dedup_key(); + +-- +migrate Down + +DROP TRIGGER trg_ensure_alert_dedup_key ON alerts; +DROP FUNCTION fn_ensure_alert_dedup_key(); + +ALTER TABLE alerts + DROP COLUMN dedup_key; -- drops the dependant index diff --git a/migrate/migrations/20171221134500-limit-configuration.sql b/migrate/migrations/20171221134500-limit-configuration.sql new file mode 100644 index 0000000000..d10e0e893e --- /dev/null +++ b/migrate/migrations/20171221134500-limit-configuration.sql @@ -0,0 +1,22 @@ + +-- +migrate Up +CREATE TYPE enum_limit_type AS ENUM ( + 'notification_rules_per_user', + 'contact_methods_per_user', + 'ep_steps_per_policy', + 'ep_actions_per_step', + 'participants_per_rotation', + 'rules_per_schedule', + 'integration_keys_per_service', + 'unacked_alerts_per_service', + 'targets_per_schedule' +); +CREATE TABLE config_limits ( + id enum_limit_type PRIMARY KEY, + max INT NOT NULL DEFAULT -1 +); + +-- +migrate Down + +DROP TABLE config_limits; +DROP TYPE enum_limit_type; diff --git a/migrate/migrations/20171221138101-notification-rule-limit.sql b/migrate/migrations/20171221138101-notification-rule-limit.sql new file mode 100644 index 0000000000..ece9786e36 --- /dev/null +++ b/migrate/migrations/20171221138101-notification-rule-limit.sql @@ -0,0 +1,41 @@ + +-- +migrate Up + +CREATE INDEX idx_notification_rule_users ON user_notification_rules (user_id); + +-- +migrate StatementBegin +CREATE FUNCTION fn_enforce_notification_rule_limit() RETURNS trigger AS $$ +DECLARE + max_count INT := -1; + val_count INT := 0; +BEGIN + SELECT INTO max_count max + FROM config_limits + WHERE id = 'notification_rules_per_user'; + + IF max_count = -1 THEN + RETURN NEW; + END IF; + + SELECT INTO val_count COUNT(*) + FROM user_notification_rules + WHERE user_id = NEW.user_id; + + IF max_count != -1 AND val_count > max_count THEN + RAISE 'limit exceeded' USING ERRCODE='check_violation', CONSTRAINT='notification_rules_per_user_limit', HINT='max='||max_count; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +-- +migrate StatementEnd + +CREATE CONSTRAINT TRIGGER trg_enforce_notification_rule_limit + AFTER INSERT ON user_notification_rules + FOR EACH ROW EXECUTE PROCEDURE fn_enforce_notification_rule_limit(); + +-- +migrate Down + +DROP TRIGGER trg_enforce_notification_rule_limit ON user_notification_rules; +DROP FUNCTION fn_enforce_notification_rule_limit(); +DROP INDEX idx_notification_rule_users; diff --git a/migrate/migrations/20171221140906-contact-method-limit.sql b/migrate/migrations/20171221140906-contact-method-limit.sql new file mode 100644 index 0000000000..f92d402ebf --- /dev/null +++ b/migrate/migrations/20171221140906-contact-method-limit.sql @@ -0,0 +1,42 @@ + +-- +migrate Up + +CREATE INDEX idx_contact_method_users ON user_contact_methods (user_id); + +-- +migrate StatementBegin +CREATE FUNCTION fn_enforce_contact_method_limit() RETURNS trigger AS $$ +DECLARE + max_count INT := -1; + val_count INT := 0; +BEGIN + SELECT INTO max_count max + FROM config_limits + WHERE id = 'contact_methods_per_user'; + + IF max_count = -1 THEN + RETURN NEW; + END IF; + + SELECT INTO val_count COUNT(*) + FROM user_contact_methods + WHERE user_id = NEW.user_id; + + IF val_count > max_count THEN + RAISE 'limit exceeded' USING ERRCODE='check_violation', CONSTRAINT='contact_methods_per_user_limit', HINT='max='||max_count; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +-- +migrate StatementEnd + + +CREATE CONSTRAINT TRIGGER trg_enforce_contact_method_limit + AFTER INSERT ON user_contact_methods + FOR EACH ROW EXECUTE PROCEDURE fn_enforce_contact_method_limit(); + +-- +migrate Down + +DROP TRIGGER trg_enforce_contact_method_limit ON user_contact_methods; +DROP FUNCTION fn_enforce_contact_method_limit(); +DROP INDEX idx_contact_method_users; diff --git a/migrate/migrations/20171221142234-ep-step-limit.sql b/migrate/migrations/20171221142234-ep-step-limit.sql new file mode 100644 index 0000000000..ff6fd6f0e8 --- /dev/null +++ b/migrate/migrations/20171221142234-ep-step-limit.sql @@ -0,0 +1,42 @@ + +-- +migrate Up + +CREATE INDEX idx_ep_step_policies ON escalation_policy_steps (escalation_policy_id); + +-- +migrate StatementBegin +CREATE FUNCTION fn_enforce_ep_step_limit() RETURNS trigger AS $$ +DECLARE + max_count INT := -1; + val_count INT := 0; +BEGIN + SELECT INTO max_count max + FROM config_limits + WHERE id = 'ep_steps_per_policy'; + + IF max_count = -1 THEN + RETURN NEW; + END IF; + + SELECT INTO val_count COUNT(*) + FROM escalation_policy_steps + WHERE escalation_policy_id = NEW.escalation_policy_id; + + IF val_count > max_count THEN + RAISE 'limit exceeded' USING ERRCODE='check_violation', CONSTRAINT='ep_steps_per_policy_limit', HINT='max='||max_count; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +-- +migrate StatementEnd + + +CREATE CONSTRAINT TRIGGER trg_enforce_ep_step_limit + AFTER INSERT ON escalation_policy_steps + FOR EACH ROW EXECUTE PROCEDURE fn_enforce_ep_step_limit(); + +-- +migrate Down + +DROP TRIGGER trg_enforce_ep_step_limit ON escalation_policy_steps; +DROP FUNCTION fn_enforce_ep_step_limit(); +DROP INDEX idx_ep_step_policies; diff --git a/migrate/migrations/20171221142553-ep-step-action-limit.sql b/migrate/migrations/20171221142553-ep-step-action-limit.sql new file mode 100644 index 0000000000..9a8d260d1d --- /dev/null +++ b/migrate/migrations/20171221142553-ep-step-action-limit.sql @@ -0,0 +1,42 @@ + +-- +migrate Up + +CREATE INDEX idx_ep_action_steps ON escalation_policy_actions (escalation_policy_step_id); + +-- +migrate StatementBegin +CREATE FUNCTION fn_enforce_ep_step_action_limit() RETURNS trigger AS $$ +DECLARE + max_count INT := -1; + val_count INT := 0; +BEGIN + SELECT INTO max_count max + FROM config_limits + WHERE id = 'ep_actions_per_step'; + + IF max_count = -1 THEN + RETURN NEW; + END IF; + + SELECT INTO val_count COUNT(*) + FROM escalation_policy_actions + WHERE escalation_policy_step_id = NEW.escalation_policy_step_id; + + IF val_count > max_count THEN + RAISE 'limit exceeded' USING ERRCODE='check_violation', CONSTRAINT='ep_actions_per_step_limit', HINT='max='||max_count; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +-- +migrate StatementEnd + + +CREATE CONSTRAINT TRIGGER trg_enforce_ep_step_action_limit + AFTER INSERT ON escalation_policy_actions + FOR EACH ROW EXECUTE PROCEDURE fn_enforce_ep_step_action_limit(); + +-- +migrate Down + +DROP TRIGGER trg_enforce_ep_step_action_limit ON escalation_policy_actions; +DROP FUNCTION fn_enforce_ep_step_action_limit(); +DROP INDEX idx_ep_action_steps; diff --git a/migrate/migrations/20171221150317-rotation-participant-limit.sql b/migrate/migrations/20171221150317-rotation-participant-limit.sql new file mode 100644 index 0000000000..c7193052cb --- /dev/null +++ b/migrate/migrations/20171221150317-rotation-participant-limit.sql @@ -0,0 +1,42 @@ + +-- +migrate Up + +CREATE INDEX idx_participant_rotation ON rotation_participants (rotation_id); + +-- +migrate StatementBegin +CREATE FUNCTION fn_enforce_rotation_participant_limit() RETURNS trigger AS $$ +DECLARE + max_count INT := -1; + val_count INT := 0; +BEGIN + SELECT INTO max_count max + FROM config_limits + WHERE id = 'participants_per_rotation'; + + IF max_count = -1 THEN + RETURN NEW; + END IF; + + SELECT INTO val_count COUNT(*) + FROM rotation_participants + WHERE rotation_id = NEW.rotation_id; + + IF val_count > max_count THEN + RAISE 'limit exceeded' USING ERRCODE='check_violation', CONSTRAINT='participants_per_rotation_limit', HINT='max='||max_count; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +-- +migrate StatementEnd + + +CREATE CONSTRAINT TRIGGER trg_enforce_rotation_participant_limit + AFTER INSERT ON rotation_participants + FOR EACH ROW EXECUTE PROCEDURE fn_enforce_rotation_participant_limit(); + +-- +migrate Down + +DROP TRIGGER trg_enforce_rotation_participant_limit ON rotation_participants; +DROP FUNCTION fn_enforce_rotation_participant_limit(); +DROP INDEX idx_participant_rotation; diff --git a/migrate/migrations/20171221150825-schedule-rule-limit.sql b/migrate/migrations/20171221150825-schedule-rule-limit.sql new file mode 100644 index 0000000000..ab1c55237d --- /dev/null +++ b/migrate/migrations/20171221150825-schedule-rule-limit.sql @@ -0,0 +1,42 @@ + +-- +migrate Up + +CREATE INDEX idx_rule_schedule ON schedule_rules (schedule_id); + +-- +migrate StatementBegin +CREATE FUNCTION fn_enforce_schedule_rule_limit() RETURNS trigger AS $$ +DECLARE + max_count INT := -1; + val_count INT := 0; +BEGIN + SELECT INTO max_count max + FROM config_limits + WHERE id = 'rules_per_schedule'; + + IF max_count = -1 THEN + RETURN NEW; + END IF; + + SELECT INTO val_count COUNT(*) + FROM schedule_rules + WHERE schedule_id = NEW.schedule_id; + + IF val_count > max_count THEN + RAISE 'limit exceeded' USING ERRCODE='check_violation', CONSTRAINT='rules_per_schedule_limit', HINT='max='||max_count; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +-- +migrate StatementEnd + + +CREATE CONSTRAINT TRIGGER trg_enforce_schedule_rule_limit + AFTER INSERT ON schedule_rules + FOR EACH ROW EXECUTE PROCEDURE fn_enforce_schedule_rule_limit(); + +-- +migrate Down + +DROP TRIGGER trg_enforce_schedule_rule_limit ON schedule_rules; +DROP FUNCTION fn_enforce_schedule_rule_limit(); +DROP INDEX idx_rule_schedule; diff --git a/migrate/migrations/20171221150955-integration-key-limit.sql b/migrate/migrations/20171221150955-integration-key-limit.sql new file mode 100644 index 0000000000..fb8b55d56e --- /dev/null +++ b/migrate/migrations/20171221150955-integration-key-limit.sql @@ -0,0 +1,42 @@ + +-- +migrate Up + +CREATE INDEX idx_integration_key_service ON integration_keys (service_id); + +-- +migrate StatementBegin +CREATE FUNCTION fn_enforce_integration_key_limit() RETURNS trigger AS $$ +DECLARE + max_count INT := -1; + val_count INT := 0; +BEGIN + SELECT INTO max_count max + FROM config_limits + WHERE id = 'integration_keys_per_service'; + + IF max_count = -1 THEN + RETURN NEW; + END IF; + + SELECT INTO val_count COUNT(*) + FROM integration_keys + WHERE service_id = NEW.service_id; + + IF val_count > max_count THEN + RAISE 'limit exceeded' USING ERRCODE='check_violation', CONSTRAINT='integration_keys_per_service_limit', HINT='max='||max_count; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +-- +migrate StatementEnd + + +CREATE CONSTRAINT TRIGGER trg_enforce_integration_key_limit + AFTER INSERT ON integration_keys + FOR EACH ROW EXECUTE PROCEDURE fn_enforce_integration_key_limit(); + +-- +migrate Down + +DROP TRIGGER trg_enforce_integration_key_limit ON integration_keys; +DROP FUNCTION fn_enforce_integration_key_limit(); +DROP INDEX idx_integration_key_service; diff --git a/migrate/migrations/20171221151358-unacked-alert-limit.sql b/migrate/migrations/20171221151358-unacked-alert-limit.sql new file mode 100644 index 0000000000..b5ea2551d4 --- /dev/null +++ b/migrate/migrations/20171221151358-unacked-alert-limit.sql @@ -0,0 +1,42 @@ + +-- +migrate Up + +CREATE INDEX idx_unacked_alert_service ON alerts ("status", service_id); + +-- +migrate StatementBegin +CREATE FUNCTION fn_enforce_alert_limit() RETURNS trigger AS $$ +DECLARE + max_count INT := -1; + val_count INT := 0; +BEGIN + SELECT INTO max_count max + FROM config_limits + WHERE id = 'unacked_alerts_per_service'; + + IF max_count = -1 THEN + RETURN NEW; + END IF; + + SELECT INTO val_count COUNT(*) + FROM alerts + WHERE service_id = NEW.service_id AND "status" = 'triggered'; + + IF val_count > max_count THEN + RAISE 'limit exceeded' USING ERRCODE='check_violation', CONSTRAINT='unacked_alerts_per_service_limit', HINT='max='||max_count; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +-- +migrate StatementEnd + + +CREATE CONSTRAINT TRIGGER trg_enforce_alert_limit + AFTER INSERT ON alerts + FOR EACH ROW EXECUTE PROCEDURE fn_enforce_alert_limit(); + +-- +migrate Down + +DROP TRIGGER trg_enforce_alert_limit ON alerts; +DROP FUNCTION fn_enforce_alert_limit(); +DROP INDEX idx_unacked_alert_service; diff --git a/migrate/migrations/20171221162356-case-insenstive-name-constraints.sql b/migrate/migrations/20171221162356-case-insenstive-name-constraints.sql new file mode 100644 index 0000000000..bc3b0172b7 --- /dev/null +++ b/migrate/migrations/20171221162356-case-insenstive-name-constraints.sql @@ -0,0 +1,17 @@ + +-- +migrate Up + +CREATE UNIQUE INDEX escalation_policies_name ON escalation_policies (lower("name")); +CREATE UNIQUE INDEX services_name ON services (lower("name")); +CREATE UNIQUE INDEX schedules_name ON schedules (lower("name")); +CREATE UNIQUE INDEX rotations_name ON rotations (lower("name")); +CREATE UNIQUE INDEX integration_keys_name_service_id ON integration_keys (lower("name"), service_id); + +-- +migrate Down + +DROP INDEX + escalation_policies_name, + services_name, + schedules_name, + rotations_name, + integration_keys_name_service_id; diff --git a/migrate/migrations/20180103113251-schedule-target-limit.sql b/migrate/migrations/20180103113251-schedule-target-limit.sql new file mode 100644 index 0000000000..6070f089b8 --- /dev/null +++ b/migrate/migrations/20180103113251-schedule-target-limit.sql @@ -0,0 +1,45 @@ + +-- +migrate Up + +CREATE INDEX idx_target_schedule ON schedule_rules (schedule_id, tgt_rotation_id, tgt_user_id); + +-- +migrate StatementBegin +CREATE FUNCTION fn_enforce_schedule_target_limit() RETURNS trigger AS $$ +DECLARE + max_count INT := -1; + val_count INT := 0; +BEGIN + SELECT INTO max_count max + FROM config_limits + WHERE id = 'targets_per_schedule'; + + IF max_count = -1 THEN + RETURN NEW; + END IF; + + SELECT INTO val_count COUNT(*) + FROM ( + SELECT DISTINCT tgt_user_id, tgt_rotation_id + FROM schedule_rules + WHERE schedule_id = NEW.schedule_id + ) as tmp; + + IF val_count > max_count THEN + RAISE 'limit exceeded' USING ERRCODE='check_violation', CONSTRAINT='targets_per_schedule_limit', HINT='max='||max_count; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +-- +migrate StatementEnd + + +CREATE CONSTRAINT TRIGGER trg_enforce_schedule_target_limit + AFTER INSERT ON schedule_rules + FOR EACH ROW EXECUTE PROCEDURE fn_enforce_schedule_target_limit(); + +-- +migrate Down + +DROP TRIGGER trg_enforce_schedule_target_limit ON schedule_rules; +DROP FUNCTION fn_enforce_schedule_target_limit(); +DROP INDEX idx_target_schedule; diff --git a/migrate/migrations/20180104114110-disable-process-alerts-queue.sql b/migrate/migrations/20180104114110-disable-process-alerts-queue.sql new file mode 100644 index 0000000000..ad7596af32 --- /dev/null +++ b/migrate/migrations/20180104114110-disable-process-alerts-queue.sql @@ -0,0 +1,31 @@ + +-- +migrate Up + +LOCK process_alerts; + +-- +migrate StatementBegin +CREATE FUNCTION fn_disable_inserts() RETURNS TRIGGER AS +$$ +BEGIN + RAISE EXCEPTION 'inserts are disabled on this table'; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +-- prevent new stuff from entering the queue +CREATE TRIGGER trg_disable_old_alert_processing +BEFORE INSERT ON process_alerts +EXECUTE PROCEDURE fn_disable_inserts(); + + +-- remove unclaimed stuff +DELETE FROM process_alerts +WHERE + client_id ISNULL OR + deadline ISNULL OR + deadline <= now(); + +-- +migrate Down + +DROP TRIGGER trg_disable_old_alert_processing ON process_alerts; +DROP FUNCTION fn_disable_inserts(); diff --git a/migrate/migrations/20180104122450-wait-alert-queue-finished.sql b/migrate/migrations/20180104122450-wait-alert-queue-finished.sql new file mode 100644 index 0000000000..40365a4477 --- /dev/null +++ b/migrate/migrations/20180104122450-wait-alert-queue-finished.sql @@ -0,0 +1,38 @@ + +-- +migrate Up notransaction + +-- +migrate StatementBegin +DO +$do$ +DECLARE + max_tries INT := 60; + c INT := 0; + n INT := 0; +BEGIN + + DELETE FROM process_alerts WHERE deadline isnull or deadline <= statement_timestamp(); + + SELECT COUNT(*) + FROM process_alerts + INTO n; + + LOOP + EXIT WHEN n = 0 OR c = max_tries; + + DELETE FROM process_alerts WHERE deadline isnull or deadline <= statement_timestamp(); + + SELECT COUNT(*), c+1 + FROM process_alerts + INTO n, c; + + PERFORM pg_sleep(1); + END LOOP; + + IF n != 0 THEN + RAISE EXCEPTION 'found active alert jobs'; + END IF; +END +$do$ +-- +migrate StatementEnd + +-- +migrate Down diff --git a/migrate/migrations/20180104123517-outgoing-messages.sql b/migrate/migrations/20180104123517-outgoing-messages.sql new file mode 100644 index 0000000000..f239677318 --- /dev/null +++ b/migrate/migrations/20180104123517-outgoing-messages.sql @@ -0,0 +1,70 @@ + +-- +migrate Up + +CREATE TYPE enum_outgoing_messages_status AS ENUM ( + 'pending', + 'sending', + 'queued_remotely', -- for use when sent, but we have status that a remote system has it queued + 'sent', + 'delivered', -- delivery confirmation + 'failed' +); +CREATE TYPE enum_outgoing_messages_type AS ENUM ( + 'alert_notification', + 'verification_message', + 'test_notification' +); + +CREATE TABLE outgoing_messages ( + id UUID NOT NULL PRIMARY KEY DEFAULT gen_random_uuid(), + message_type enum_outgoing_messages_type NOT NULL, + contact_method_id UUID NOT NULL REFERENCES user_contact_methods (id) ON DELETE CASCADE, + created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), + last_status enum_outgoing_messages_status NOT NULL DEFAULT 'pending', + last_status_at TIMESTAMP WITH TIME ZONE DEFAULT now(), + status_details TEXT NOT NULL DEFAULT '', + fired_at TIMESTAMP WITH TIME ZONE, + sent_at TIMESTAMP WITH TIME ZONE, + retry_count INT NOT NULL DEFAULT 0, + next_retry_at TIMESTAMP WITH TIME ZONE, + sending_deadline TIMESTAMP WITH TIME ZONE, + user_id UUID NOT NULL REFERENCES users (id) ON DELETE CASCADE, + + alert_id BIGINT REFERENCES alerts (id) ON DELETE CASCADE, + cycle_id UUID REFERENCES notification_policy_cycles (id) ON DELETE CASCADE, + service_id UUID REFERENCES services (id) ON DELETE CASCADE, + escalation_policy_id UUID REFERENCES escalation_policies (id) ON DELETE CASCADE, + + CONSTRAINT om_pending_no_fired_no_sent CHECK( + last_status != 'pending' or (fired_at isnull and sent_at isnull) + ), + CONSTRAINT om_sending_fired_no_sent CHECK( + last_status != 'sending' or (fired_at notnull and sent_at isnull) + ), + CONSTRAINT om_processed_no_fired_sent CHECK( + last_status in ('pending','sending','failed') or + (fired_at isnull and sent_at notnull) + ), + CONSTRAINT om_alert_svc_ep_ids CHECK( + message_type != 'alert_notification' or ( + alert_id notnull and + service_id notnull and + escalation_policy_id notnull + ) + ), + CONSTRAINT om_sending_deadline_reqd CHECK( + last_status != 'sending' or sending_deadline notnull + ) +); + +CREATE INDEX idx_om_alert_sent ON outgoing_messages (alert_id, sent_at); +CREATE INDEX idx_om_ep_sent ON outgoing_messages (escalation_policy_id, sent_at); +CREATE INDEX idx_om_service_sent ON outgoing_messages (service_id, sent_at); +CREATE INDEX idx_om_cm_sent ON outgoing_messages (contact_method_id, sent_at); +CREATE INDEX idx_om_user_sent ON outgoing_messages (user_id, sent_at); + +-- +migrate Down + +DROP TABLE outgoing_messages; +DROP TYPE enum_outgoing_messages_type; +DROP TYPE enum_outgoing_messages_status; diff --git a/migrate/migrations/20180104124640-ncycle-tick.sql b/migrate/migrations/20180104124640-ncycle-tick.sql new file mode 100644 index 0000000000..109dccc3ea --- /dev/null +++ b/migrate/migrations/20180104124640-ncycle-tick.sql @@ -0,0 +1,73 @@ + +-- +migrate Up + +LOCK + notification_policy_cycles, + notification_logs; + +ALTER TABLE notification_policy_cycles + ADD COLUMN last_tick TIMESTAMP WITH TIME ZONE; + + +-- add sent messages for active cycles +INSERT INTO outgoing_messages ( + id, + message_type, + contact_method_id, + created_at, + last_status, + last_status_at, + status_details, + sent_at, + alert_id, + cycle_id, + user_id, + service_id, + escalation_policy_id +) +SELECT + log.id, + cast('alert_notification' as enum_outgoing_messages_type), + log.contact_method_id, + log.process_timestamp, + cast(case when log.completed then 'sent' else 'pending' end as enum_outgoing_messages_status), + log.process_timestamp, + 'migrated', + case when log.completed then process_timestamp else null end, + log.alert_id, + cycle.id, + cm.user_id, + a.service_id, + svc.escalation_policy_id +FROM notification_logs log +JOIN notification_policy_cycles cycle ON cycle.alert_id = log.alert_id AND cycle.checked AND cycle.started_at <= log.process_timestamp +JOIN user_contact_methods cm ON cm.id = log.contact_method_id AND cm.user_id = cycle.user_id +JOIN alerts a ON a.id = log.alert_id +JOIN services svc ON svc.id = a.service_id +ORDER BY process_timestamp DESC +ON CONFLICT DO NOTHING +; + + +with last_sent as ( + select distinct + alert_id, + cm.user_id, + max(process_timestamp) + from notification_logs log + join user_contact_methods cm on cm.id = log.contact_method_id + where log.completed + group by alert_id, cm.user_id +) +update notification_policy_cycles cycle +set last_tick = last_sent.max +from last_sent +where + last_sent.alert_id = cycle.alert_id and + last_sent.user_id = cycle.user_id; + +-- +migrate Down + +DELETE FROM outgoing_messages WHERE status_details = 'migrated'; +ALTER TABLE notification_policy_cycles + DROP COLUMN last_tick; diff --git a/migrate/migrations/20180104125444-twilio-sms-multiple-callbacks.sql b/migrate/migrations/20180104125444-twilio-sms-multiple-callbacks.sql new file mode 100644 index 0000000000..423ba72218 --- /dev/null +++ b/migrate/migrations/20180104125444-twilio-sms-multiple-callbacks.sql @@ -0,0 +1,12 @@ + +-- +migrate Up +ALTER TABLE twilio_sms_callbacks + DROP CONSTRAINT twilio_sms_callbacks_phone_number_twilio_sid_key, + DROP CONSTRAINT twilio_sms_callbacks_pkey; + +-- +migrate Down +ALTER TABLE twilio_sms_callbacks + ADD CONSTRAINT twilio_sms_callbacks_phone_number_twilio_sid_key UNIQUE (phone_number, twilio_sid), + ADD CONSTRAINT twilio_sms_callbacks_pkey PRIMARY KEY (phone_number, code); + + \ No newline at end of file diff --git a/migrate/migrations/20180109114058-email-integration-key.sql b/migrate/migrations/20180109114058-email-integration-key.sql new file mode 100644 index 0000000000..0a1bf78758 --- /dev/null +++ b/migrate/migrations/20180109114058-email-integration-key.sql @@ -0,0 +1,6 @@ + +-- +migrate Up notransaction + +ALTER TYPE enum_integration_keys_type ADD VALUE IF NOT EXISTS 'email'; + +-- +migrate Down diff --git a/migrate/migrations/20180110155110-alert-unique-dedup-service.sql b/migrate/migrations/20180110155110-alert-unique-dedup-service.sql new file mode 100644 index 0000000000..bd6bd02471 --- /dev/null +++ b/migrate/migrations/20180110155110-alert-unique-dedup-service.sql @@ -0,0 +1,57 @@ + +-- +migrate Up + +ALTER TABLE alerts + ALTER COLUMN dedup_key DROP NOT NULL; + +UPDATE alerts +SET dedup_key = NULL +WHERE status = 'closed'; + +ALTER TABLE alerts + ADD CONSTRAINT dedup_key_only_for_open_alerts CHECK((status = 'closed') = (dedup_key isnull)); + +CREATE UNIQUE INDEX idx_no_alert_duplicates ON alerts (service_id, dedup_key); + +-- +migrate StatementBegin +CREATE FUNCTION fn_clear_dedup_on_close() RETURNS trigger AS $$ +BEGIN + NEW.dedup_key = NULL; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +-- +migrate StatementEnd + +DROP TRIGGER trg_ensure_alert_dedup_key ON ALERTS; +CREATE TRIGGER trg_ensure_alert_dedup_key BEFORE INSERT ON alerts +FOR EACH ROW +WHEN (NEW.status != 'closed') +EXECUTE PROCEDURE fn_ensure_alert_dedup_key(); + +CREATE TRIGGER trg_clear_dedup_on_close BEFORE UPDATE ON alerts +FOR EACH ROW +WHEN (NEW.status != OLD.status AND NEW.status = 'closed') +EXECUTE PROCEDURE fn_clear_dedup_on_close(); + +-- +migrate Down + +DROP INDEX idx_no_alert_duplicates; +ALTER TABLE alerts + DROP CONSTRAINT dedup_key_only_for_open_alerts; + +UPDATE alerts +SET dedup_key = concat( + 'auto:1:', + encode(digest(concat("description"), 'sha512'), 'hex') + ) +WHERE dedup_key ISNULL; + +ALTER TABLE alerts + ALTER COLUMN dedup_key SET NOT NULL; + +DROP TRIGGER trg_clear_dedup_on_close ON alerts; +DROP FUNCTION fn_clear_dedup_on_close(); + +DROP TRIGGER trg_ensure_alert_dedup_key ON ALERTS; +CREATE TRIGGER trg_ensure_alert_dedup_key BEFORE INSERT ON alerts +FOR EACH ROW EXECUTE PROCEDURE fn_ensure_alert_dedup_key(); diff --git a/migrate/migrations/20180117110856-status-update-message-type.sql b/migrate/migrations/20180117110856-status-update-message-type.sql new file mode 100644 index 0000000000..48310b44d5 --- /dev/null +++ b/migrate/migrations/20180117110856-status-update-message-type.sql @@ -0,0 +1,7 @@ + +-- +migrate Up notransaction + +ALTER TYPE enum_outgoing_messages_type + ADD VALUE IF NOT EXISTS 'alert_status_update'; + +-- +migrate Down diff --git a/migrate/migrations/20180117115123-alert-status-updates.sql b/migrate/migrations/20180117115123-alert-status-updates.sql new file mode 100644 index 0000000000..21a817e41f --- /dev/null +++ b/migrate/migrations/20180117115123-alert-status-updates.sql @@ -0,0 +1,86 @@ + +-- +migrate Up + +ALTER TABLE users + ADD COLUMN alert_status_log_contact_method_id UUID + REFERENCES user_contact_methods (id) ON DELETE SET NULL; + +ALTER TABLE outgoing_messages + ADD COLUMN alert_log_id BIGINT REFERENCES alert_logs (id) ON DELETE CASCADE, + ADD CONSTRAINT om_status_update_log_id CHECK( + message_type != 'alert_status_update' OR + alert_log_id NOTNULL + ); + +CREATE TABLE user_last_alert_log ( + user_id UUID NOT NULL REFERENCES users (id) ON DELETE CASCADE, + alert_id BIGINT NOT NULL REFERENCES alerts (id) ON DELETE CASCADE, + log_id BIGINT NOT NULL REFERENCES alert_logs (id) ON DELETE CASCADE, + next_log_id BIGINT NOT NULL REFERENCES alert_logs (id) ON DELETE CASCADE, + + PRIMARY KEY (user_id, alert_id) +); + +-- +migrate StatementBegin +CREATE FUNCTION fn_insert_user_last_alert_log() RETURNS trigger AS $$ +BEGIN + + INSERT INTO user_last_alert_log (user_id, alert_id, log_id, next_log_id) + VALUES (NEW.sub_user_id, NEW.alert_id, NEW.id, NEW.id) + ON CONFLICT DO NOTHING; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +-- +migrate StatementEnd + +-- +migrate StatementBegin +CREATE FUNCTION fn_update_user_last_alert_log() RETURNS trigger AS $$ +BEGIN + + UPDATE user_last_alert_log last + SET next_log_id = NEW.id + WHERE + last.alert_id = NEW.alert_id AND + NEW.id > last.next_log_id; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +-- +migrate StatementEnd + +CREATE TRIGGER trg_insert_alert_logs_user_last_alert +AFTER INSERT +ON alert_logs +FOR EACH ROW +WHEN (NEW.event = 'notification_sent') +EXECUTE PROCEDURE fn_insert_user_last_alert_log(); + +CREATE TRIGGER trg_insert_alert_logs_user_last_alert_update +AFTER INSERT +ON alert_logs +FOR EACH ROW +WHEN (NEW.event IN ('acknowledged', 'closed')) +EXECUTE PROCEDURE fn_update_user_last_alert_log(); + +CREATE INDEX idx_alert_logs_alert_event ON alert_logs (alert_id, event); + +-- +migrate Down + +ALTER TABLE users + DROP COLUMN alert_status_log_contact_method_id; + +DELETE FROM outgoing_messages WHERE message_type = 'alert_status_update'; + +ALTER TABLE outgoing_messages + DROP COLUMN alert_log_id; + +DROP TABLE user_last_alert_log; + +DROP INDEX idx_alert_logs_alert_event; + +DROP TRIGGER trg_insert_alert_logs_user_last_alert_update ON alert_logs; +DROP TRIGGER trg_insert_alert_logs_user_last_alert ON alert_logs; + +DROP FUNCTION fn_update_user_last_alert_log(); +DROP FUNCTION fn_insert_user_last_alert_log(); diff --git a/migrate/migrations/20180118112019-restrict-cm-to-same-user.sql b/migrate/migrations/20180118112019-restrict-cm-to-same-user.sql new file mode 100644 index 0000000000..4bea3dc61f --- /dev/null +++ b/migrate/migrations/20180118112019-restrict-cm-to-same-user.sql @@ -0,0 +1,68 @@ + +-- +migrate Up + + + +-- +migrate StatementBegin +CREATE FUNCTION fn_enforce_status_update_same_user() RETURNS trigger AS $$ +DECLARE + _cm_user_id UUID; +BEGIN + IF NEW.alert_status_log_contact_method_id ISNULL THEN + RETURN NEW; + END IF; + + SELECT INTO _cm_user_id user_id + FROM user_contact_methods + WHERE id = NEW.alert_status_log_contact_method_id; + + IF NEW.id != _cm_user_id THEN + RAISE 'wrong user_id' USING ERRCODE='check_violation', CONSTRAINT='alert_status_user_id_match'; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +-- +migrate StatementEnd + +-- +migrate StatementBegin +CREATE FUNCTION fn_notification_rule_same_user() RETURNS trigger AS $$ +DECLARE + _cm_user_id UUID; +BEGIN + SELECT INTO _cm_user_id user_id + FROM user_contact_methods + WHERE id = NEW.contact_method_id; + + IF NEW.user_id != _cm_user_id THEN + RAISE 'wrong user_id' USING ERRCODE='check_violation', CONSTRAINT='notification_rule_user_id_match'; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +-- +migrate StatementEnd + + +CREATE TRIGGER trg_enforce_status_update_same_user + BEFORE INSERT OR UPDATE ON users + FOR EACH ROW EXECUTE PROCEDURE fn_enforce_status_update_same_user(); + + +LOCK user_notification_rules; + +CREATE TRIGGER trg_notification_rule_same_user + BEFORE INSERT OR UPDATE ON user_notification_rules + FOR EACH ROW EXECUTE PROCEDURE fn_notification_rule_same_user(); + +DELETE FROM user_notification_rules r +USING user_contact_methods cm +WHERE cm.id = r.contact_method_id AND cm.user_id != r.user_id; + +-- +migrate Down + +DROP TRIGGER IF EXISTS trg_notification_rule_same_user ON user_notification_rules; +DROP TRIGGER IF EXISTS trg_enforce_status_update_same_user ON users; + +DROP FUNCTION IF EXISTS fn_notification_rule_same_user(); +DROP FUNCTION IF EXISTS fn_enforce_status_update_same_user(); diff --git a/migrate/migrations/20180126162030-heartbeat-auth-log-subject-type.sql b/migrate/migrations/20180126162030-heartbeat-auth-log-subject-type.sql new file mode 100644 index 0000000000..c8c557a580 --- /dev/null +++ b/migrate/migrations/20180126162030-heartbeat-auth-log-subject-type.sql @@ -0,0 +1,6 @@ + +-- +migrate Up notransaction + +ALTER TYPE enum_alert_log_subject_type ADD VALUE IF NOT EXISTS 'heartbeat_monitor'; + +-- +migrate Down diff --git a/migrate/migrations/20180126162093-heartbeats.sql b/migrate/migrations/20180126162093-heartbeats.sql new file mode 100644 index 0000000000..5e513b3ea2 --- /dev/null +++ b/migrate/migrations/20180126162093-heartbeats.sql @@ -0,0 +1,24 @@ + +-- +migrate Up + +CREATE TYPE enum_heartbeat_state AS ENUM ( + 'inactive', + 'healthy', + 'unhealthy' +); + +CREATE TABLE heartbeat_monitors ( + id UUID PRIMARY KEY, + name TEXT NOT NULL, + service_id UUID NOT NULL REFERENCES services(id), + heartbeat_interval INTERVAL NOT NULL, + last_state enum_heartbeat_state NOT NULL DEFAULT 'inactive', + last_heartbeat TIMESTAMP WITH TIME ZONE +); + +CREATE UNIQUE INDEX heartbeat_monitor_name_service_id ON heartbeat_monitors (lower("name"), service_id); + +-- +migrate Down + +DROP TABLE heartbeat_monitors; +DROP TYPE enum_heartbeat_state; diff --git a/migrate/migrations/20180126162144-heartbeat-auth-log-data.sql b/migrate/migrations/20180126162144-heartbeat-auth-log-data.sql new file mode 100644 index 0000000000..a247ac8fd2 --- /dev/null +++ b/migrate/migrations/20180126162144-heartbeat-auth-log-data.sql @@ -0,0 +1,19 @@ + +-- +migrate Up + +ALTER TABLE alert_logs + ADD COLUMN sub_hb_monitor_id UUID REFERENCES heartbeat_monitors (id) ON DELETE SET NULL, + DROP CONSTRAINT alert_logs_one_subject, + ADD CONSTRAINT alert_logs_one_subject CHECK( + NOT (sub_user_id NOTNULL AND sub_integration_key_id NOTNULL AND sub_hb_monitor_id NOTNULL) + ) +; + +-- +migrate Down + +ALTER TABLE alert_logs + DROP COLUMN sub_hb_monitor_id, + ADD CONSTRAINT alert_logs_one_subject CHECK( + NOT (sub_user_id NOTNULL AND sub_integration_key_id NOTNULL) + ) +; diff --git a/migrate/migrations/20180130123755-heartbeat-limit-key.sql b/migrate/migrations/20180130123755-heartbeat-limit-key.sql new file mode 100644 index 0000000000..52f9e823fd --- /dev/null +++ b/migrate/migrations/20180130123755-heartbeat-limit-key.sql @@ -0,0 +1,5 @@ + +-- +migrate Up notransaction +ALTER TYPE enum_limit_type ADD VALUE IF NOT EXISTS 'heartbeat_monitors_per_service'; + +-- +migrate Down diff --git a/migrate/migrations/20180130123852-heartbeat-limit.sql b/migrate/migrations/20180130123852-heartbeat-limit.sql new file mode 100644 index 0000000000..1ec851b0ff --- /dev/null +++ b/migrate/migrations/20180130123852-heartbeat-limit.sql @@ -0,0 +1,42 @@ + +-- +migrate Up + +CREATE INDEX idx_heartbeat_monitor_service ON heartbeat_monitors (service_id); + +-- +migrate StatementBegin +CREATE FUNCTION fn_enforce_heartbeat_limit() RETURNS trigger AS $$ +DECLARE + max_count INT := -1; + val_count INT := 0; +BEGIN + SELECT INTO max_count max + FROM config_limits + WHERE id = 'heartbeat_monitors_per_service'; + + IF max_count = -1 THEN + RETURN NEW; + END IF; + + SELECT INTO val_count COUNT(*) + FROM heartbeat_monitors + WHERE service_id = NEW.service_id; + + IF val_count > max_count THEN + RAISE 'limit exceeded' USING ERRCODE='check_violation', CONSTRAINT='heartbeat_monitors_per_service_limit', HINT='max='||max_count; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +-- +migrate StatementEnd + + +CREATE CONSTRAINT TRIGGER trg_enforce_heartbeat_monitor_limit + AFTER INSERT ON heartbeat_monitors + FOR EACH ROW EXECUTE PROCEDURE fn_enforce_heartbeat_limit(); + +-- +migrate Down + +DROP TRIGGER trg_enforce_heartbeat_monitor_limit ON heartbeat_monitors; +DROP FUNCTION fn_enforce_heartbeat_limit(); +DROP INDEX idx_heartbeat_monitor_service; diff --git a/migrate/migrations/20180201180221-add-verification-code.sql b/migrate/migrations/20180201180221-add-verification-code.sql new file mode 100644 index 0000000000..138df149dd --- /dev/null +++ b/migrate/migrations/20180201180221-add-verification-code.sql @@ -0,0 +1,28 @@ + +-- +migrate Up +CREATE TABLE user_verification_codes ( + id UUID PRIMARY KEY, + user_id UUID NOT NULL REFERENCES users(id), + contact_method_value TEXT NOT NULL, + code int NOT NULL, + expires_at TIMESTAMP WITH TIME ZONE NOT NULL, + send_to UUID REFERENCES user_contact_methods(id), + UNIQUE(user_id, contact_method_value) +); + +ALTER TABLE user_contact_methods + ADD COLUMN last_test_verify_at TIMESTAMP WITH TIME ZONE; + +ALTER TABLE outgoing_messages + ADD COLUMN user_verification_code_id UUID REFERENCES user_verification_codes(id) ON DELETE CASCADE, + ADD CONSTRAINT verify_needs_id CHECK(message_type != 'verification_message' OR user_verification_code_id NOTNULL); + +-- +migrate Down + +ALTER TABLE user_contact_methods + DROP COLUMN last_test_verify_at; + +ALTER TABLE outgoing_messages + DROP COLUMN user_verification_code_id; + +DROP TABLE user_verification_codes; diff --git a/migrate/migrations/20180207113632-ep-step-number-consistency.sql b/migrate/migrations/20180207113632-ep-step-number-consistency.sql new file mode 100644 index 0000000000..51fa0a115f --- /dev/null +++ b/migrate/migrations/20180207113632-ep-step-number-consistency.sql @@ -0,0 +1,137 @@ + +-- +migrate Up + + +-- read/write lock the table so once unlocked it will be both fixed and triggers in place to ensure future consistency. +LOCK escalation_policy_steps; + +-- Fix any existing discrepencies +UPDATE escalation_policy_steps step +SET step_number = computed.step_number +FROM ( + SELECT + id, + row_number() OVER (PARTITION BY escalation_policy_id ORDER BY step_number) - 1 AS step_number + FROM escalation_policy_steps +) computed +WHERE + step.id = computed.id AND + step.step_number != computed.step_number; + +-- +migrate StatementBegin +CREATE FUNCTION fn_inc_ep_step_number_on_insert() RETURNS trigger AS $$ +BEGIN + LOCK escalation_policy_steps IN EXCLUSIVE MODE; + + SELECT count(*) + INTO NEW.step_number + FROM escalation_policy_steps + WHERE escalation_policy_id = NEW.escalation_policy_id; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +-- +migrate StatementEnd + +-- +migrate StatementBegin +CREATE FUNCTION fn_decr_ep_step_number_on_delete() RETURNS trigger AS $$ +BEGIN + LOCK escalation_policy_steps IN EXCLUSIVE MODE; + + UPDATE escalation_policy_steps + SET step_number = step_number - 1 + WHERE + escalation_policy_id = OLD.escalation_policy_id AND + step_number > OLD.step_number; + + RETURN OLD; +END; +$$ LANGUAGE plpgsql; +-- +migrate StatementEnd + +-- +migrate StatementBegin +CREATE FUNCTION fn_enforce_ep_step_number_no_gaps() RETURNS trigger AS $$ +DECLARE + max_pos INT := -1; + step_count INT := 0; +BEGIN + IF NEW.escalation_policy_id != OLD.escalation_policy_id THEN + RAISE 'must not change escalation_policy_id of existing step'; + END IF; + + SELECT max(step_number), count(*) + INTO max_pos, step_count + FROM escalation_policy_steps + WHERE escalation_policy_id = NEW.escalation_policy_id; + + IF max_pos >= step_count THEN + RAISE 'must not have gap in step_numbers'; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +-- +migrate StatementEnd + +-- ensure updates don't cause gaps +CREATE CONSTRAINT TRIGGER trg_ep_step_number_no_gaps + AFTER UPDATE + ON escalation_policy_steps + INITIALLY DEFERRED + FOR EACH ROW + EXECUTE PROCEDURE fn_enforce_ep_step_number_no_gaps(); + +DROP TRIGGER incr_escalation_policy_steps_on_delete ON escalation_policy_steps; +DROP TRIGGER set_escalation_policy_step_on_insert ON escalation_policy_steps; +DROP FUNCTION set_escalation_policy_step(); +DROP FUNCTION incr_escalation_policy_steps_on_delete(); + +CREATE TRIGGER trg_inc_ep_step_number_on_insert + BEFORE INSERT + ON escalation_policy_steps + FOR EACH ROW + EXECUTE PROCEDURE fn_inc_ep_step_number_on_insert(); + +CREATE TRIGGER trg_decr_ep_step_number_on_delete + BEFORE DELETE + ON escalation_policy_steps + FOR EACH ROW + EXECUTE PROCEDURE fn_decr_ep_step_number_on_delete(); + +-- +migrate Down + +DROP TRIGGER trg_ep_step_number_no_gaps ON escalation_policy_steps; +DROP FUNCTION fn_enforce_ep_step_number_no_gaps(); + +-- +migrate StatementBegin +CREATE FUNCTION set_escalation_policy_step() RETURNS trigger +LANGUAGE plpgsql +AS $$ +BEGIN +SELECT count(step_number) INTO NEW.step_number FROM escalation_policy_steps WHERE escalation_policy_id = NEW.escalation_policy_id; +RETURN NEW; +END; +$$; +-- +migrate StatementEnd + +-- +migrate StatementBegin +CREATE FUNCTION incr_escalation_policy_steps_on_delete() RETURNS trigger +LANGUAGE plpgsql +AS $$ +BEGIN +UPDATE escalation_policy_steps +SET step_number = step_number-1 +WHERE escalation_policy_id = OLD.escalation_policy_id +AND step_number > OLD.step_number; + +RETURN OLD; +END; +$$; +-- +migrate StatementEnd + +CREATE TRIGGER incr_escalation_policy_steps_on_delete AFTER DELETE ON escalation_policy_steps FOR EACH ROW EXECUTE PROCEDURE incr_escalation_policy_steps_on_delete(); +CREATE TRIGGER set_escalation_policy_step_on_insert BEFORE INSERT ON escalation_policy_steps FOR EACH ROW EXECUTE PROCEDURE set_escalation_policy_step(); +DROP TRIGGER trg_decr_ep_step_number_on_delete ON escalation_policy_steps; +DROP TRIGGER trg_inc_ep_step_number_on_insert ON escalation_policy_steps; +DROP FUNCTION fn_decr_ep_step_number_on_delete(); +DROP FUNCTION fn_inc_ep_step_number_on_insert(); diff --git a/migrate/migrations/20180207124220-rotation-participant-position-consistency.sql b/migrate/migrations/20180207124220-rotation-participant-position-consistency.sql new file mode 100644 index 0000000000..588904e427 --- /dev/null +++ b/migrate/migrations/20180207124220-rotation-participant-position-consistency.sql @@ -0,0 +1,106 @@ + +-- +migrate Up + + +-- read/write lock the table so once unlocked it will be both fixed and triggers in place to ensure future consistency. +LOCK rotation_participants; + +-- Fix any existing discrepencies +UPDATE rotation_participants part +SET position = computed.position +FROM ( + SELECT + id, + row_number() OVER (PARTITION BY rotation_id ORDER BY position) - 1 AS position + FROM rotation_participants +) computed +WHERE + part.id = computed.id AND + part.position != computed.position; + + +-- +migrate StatementBegin +CREATE FUNCTION fn_inc_rot_part_position_on_insert() RETURNS trigger AS $$ +BEGIN + LOCK rotation_participants IN EXCLUSIVE MODE; + + SELECT count(*) + INTO NEW.position + FROM rotation_participants + WHERE rotation_id = NEW.rotation_id; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +-- +migrate StatementEnd + +-- +migrate StatementBegin +CREATE FUNCTION fn_decr_rot_part_position_on_delete() RETURNS trigger AS $$ +BEGIN + LOCK rotation_participants IN EXCLUSIVE MODE; + + UPDATE rotation_participants + SET position = position - 1 + WHERE + rotation_id = OLD.rotation_id AND + position > OLD.position; + + RETURN OLD; +END; +$$ LANGUAGE plpgsql; +-- +migrate StatementEnd + +-- +migrate StatementBegin +CREATE FUNCTION fn_enforce_rot_part_position_no_gaps() RETURNS trigger AS $$ +DECLARE + max_pos INT := -1; + part_count INT := 0; +BEGIN + IF NEW.rotation_id != OLD.rotation_id THEN + RAISE 'must not change rotation_id of existing participant'; + END IF; + + SELECT max(position), count(*) + INTO max_pos, part_count + FROM rotation_participants + WHERE rotation_id = NEW.rotation_id; + + IF max_pos >= part_count THEN + RAISE 'must not have gap in participant positions'; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +-- +migrate StatementEnd + +-- ensure updates don't cause gaps +CREATE CONSTRAINT TRIGGER trg_enforce_rot_part_position_no_gaps + AFTER UPDATE + ON rotation_participants + INITIALLY DEFERRED + FOR EACH ROW + EXECUTE PROCEDURE fn_enforce_rot_part_position_no_gaps(); + + +CREATE TRIGGER trg_inc_rot_part_position_on_insert + BEFORE INSERT + ON rotation_participants + FOR EACH ROW + EXECUTE PROCEDURE fn_inc_rot_part_position_on_insert(); + +CREATE TRIGGER trg_decr_rot_part_position_on_delete + BEFORE DELETE + ON rotation_participants + FOR EACH ROW + EXECUTE PROCEDURE fn_decr_rot_part_position_on_delete(); + +-- +migrate Down + +DROP TRIGGER trg_enforce_rot_part_position_no_gaps ON rotation_participants; +DROP FUNCTION fn_enforce_rot_part_position_no_gaps(); + +DROP TRIGGER trg_decr_rot_part_position_on_delete ON rotation_participants; +DROP TRIGGER trg_inc_rot_part_position_on_insert ON rotation_participants; +DROP FUNCTION fn_inc_rot_part_position_on_insert(); +DROP FUNCTION fn_decr_rot_part_position_on_delete(); diff --git a/migrate/migrations/20180216104945-alerts-split-summary-details.sql b/migrate/migrations/20180216104945-alerts-split-summary-details.sql new file mode 100644 index 0000000000..9eaec5ad6a --- /dev/null +++ b/migrate/migrations/20180216104945-alerts-split-summary-details.sql @@ -0,0 +1,63 @@ + +-- +migrate Up + +LOCK alerts; + +ALTER TABLE alerts + ADD COLUMN summary TEXT, + ADD COLUMN details TEXT DEFAULT '' +; + +UPDATE alerts +SET + summary = split_part(description, chr(10), 1), + details = ( + case when strpos(description, chr(10)) > 0 then + substr(description, strpos(description, chr(10))+1) + else + '' + end + ) +; + +ALTER TABLE alerts + ALTER COLUMN summary SET NOT NULL, + ALTER COLUMN details SET NOT NULL +; + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_alerts_ensure_description() RETURNS TRIGGER AS +$$ +BEGIN + IF NEW.description ISNULL THEN + NEW.description = TRIM(TRAILING chr(10) FROM NEW.summary || chr(10) || NEW.details); + END IF; + + IF NEW.summary ISNULL THEN + NEW.summary = split_part(NEW.description, chr(10), 1); + NEW.details = ( + case when strpos(NEW.description, chr(10)) > 0 then + substr(NEW.description, strpos(NEW.description, chr(10))+1) + else + '' + end + ); + END IF; + RETURN NEW; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +CREATE TRIGGER trg_00_ensure_description BEFORE INSERT ON alerts +FOR EACH ROW EXECUTE PROCEDURE fn_alerts_ensure_description(); + +-- +migrate Down + +LOCK alerts; + +DROP TRIGGER trg_00_ensure_description ON alerts; +DROP FUNCTION fn_alerts_ensure_description(); + +ALTER TABLE alerts + DROP COLUMN summary, + DROP COLUMN details; diff --git a/migrate/migrations/20180228103159-schedule-overrides-limit-key.sql b/migrate/migrations/20180228103159-schedule-overrides-limit-key.sql new file mode 100644 index 0000000000..f91c841714 --- /dev/null +++ b/migrate/migrations/20180228103159-schedule-overrides-limit-key.sql @@ -0,0 +1,5 @@ + +-- +migrate Up notransaction +ALTER TYPE enum_limit_type ADD VALUE IF NOT EXISTS 'user_overrides_per_schedule'; + +-- +migrate Down diff --git a/migrate/migrations/20180228111204-schedule-overrides.sql b/migrate/migrations/20180228111204-schedule-overrides.sql new file mode 100644 index 0000000000..579fab5d73 --- /dev/null +++ b/migrate/migrations/20180228111204-schedule-overrides.sql @@ -0,0 +1,93 @@ + +-- +migrate Up + +CREATE TABLE user_overrides ( + id UUID PRIMARY KEY, + + start_time TIMESTAMP WITH TIME ZONE NOT NULL, + end_time TIMESTAMP WITH TIME ZONE NOT NULL, + CHECK(end_time > start_time), -- needs name + CHECK(end_time > now()), + + add_user_id UUID REFERENCES users (id) ON DELETE CASCADE, + remove_user_id UUID REFERENCES users (id) ON DELETE CASCADE, + CHECK(COALESCE(add_user_id, remove_user_id) NOTNULL), + CHECK(add_user_id != remove_user_id), + + tgt_schedule_id UUID NOT NULL REFERENCES schedules (id) ON DELETE CASCADE +); + +-- +migrate StatementBegin +CREATE FUNCTION fn_enforce_user_overide_no_conflict() RETURNS trigger AS $$ +DECLARE + conflict UUID := NULL; +BEGIN + SELECT id INTO conflict + FROM user_overrides + WHERE + id != NEW.id AND + tgt_schedule_id = NEW.tgt_schedule_id AND + ( + add_user_id in (NEW.remove_user_id, NEW.add_user_id) OR + remove_user_id in (NEW.remove_user_id, NEW.add_user_id) + ) AND + (start_time, end_time) OVERLAPS (NEW.start_time, NEW.end_time) + LIMIT 1; + + IF conflict NOTNULL THEN + RAISE 'override conflict' USING ERRCODE='check_violation', CONSTRAINT='user_override_no_conflict_allowed', HINT='CONFLICTING_ID='||conflict::text; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +-- +migrate StatementEnd + + +CREATE CONSTRAINT TRIGGER trg_enforce_user_overide_no_conflict + AFTER INSERT OR UPDATE ON user_overrides + FOR EACH ROW EXECUTE PROCEDURE fn_enforce_user_overide_no_conflict(); + +CREATE INDEX idx_user_overrides_schedule ON user_overrides (tgt_schedule_id, end_time); + +-- +migrate StatementBegin +CREATE FUNCTION fn_enforce_user_override_schedule_limit() RETURNS trigger AS $$ +DECLARE + max_count INT := -1; + val_count INT := 0; +BEGIN + SELECT INTO max_count max + FROM config_limits + WHERE id = 'user_overrides_per_schedule'; + + IF max_count = -1 THEN + RETURN NEW; + END IF; + + SELECT INTO val_count COUNT(*) + FROM user_overrides + WHERE + tgt_schedule_id = NEW.tgt_schedule_id AND + end_time > now(); + + IF val_count > max_count THEN + RAISE 'limit exceeded' USING ERRCODE='check_violation', CONSTRAINT='user_overrides_per_schedule_limit', HINT='max='||max_count; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +-- +migrate StatementEnd + + +CREATE CONSTRAINT TRIGGER trg_enforce_user_override_schedule_limit + AFTER INSERT ON user_overrides + FOR EACH ROW EXECUTE PROCEDURE fn_enforce_user_override_schedule_limit(); + + +-- +migrate Down + +DROP TABLE user_overrides; + +DROP FUNCTION fn_enforce_user_overide_no_conflict(); +DROP FUNCTION fn_enforce_user_override_schedule_limit(); diff --git a/migrate/migrations/20180313152132-schedule-on-call-users.sql b/migrate/migrations/20180313152132-schedule-on-call-users.sql new file mode 100644 index 0000000000..337c54af2b --- /dev/null +++ b/migrate/migrations/20180313152132-schedule-on-call-users.sql @@ -0,0 +1,17 @@ + +-- +migrate Up + +CREATE TABLE schedule_on_call_users ( + schedule_id UUID NOT NULL REFERENCES schedules (id) ON DELETE CASCADE, + start_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), + end_time TIMESTAMP WITH TIME ZONE, + user_id UUID NOT NULL REFERENCES users (id) ON DELETE CASCADE, + + CHECK(end_time ISNULL OR end_time > start_time), + + UNIQUE(schedule_id, user_id, end_time) +); + +-- +migrate Down + +DROP TABLE schedule_on_call_users; diff --git a/migrate/migrations/20180315113303-strict-rotation-state.sql b/migrate/migrations/20180315113303-strict-rotation-state.sql new file mode 100644 index 0000000000..18bc61b3d2 --- /dev/null +++ b/migrate/migrations/20180315113303-strict-rotation-state.sql @@ -0,0 +1,225 @@ + +-- +migrate Up + +ALTER TABLE rotation_state + DROP CONSTRAINT rotation_state_rotation_participant_id_fkey, + ADD CONSTRAINT rotation_state_rotation_participant_id_fkey + FOREIGN KEY (rotation_participant_id) + REFERENCES rotation_participants (id) + ON DELETE RESTRICT, + ALTER rotation_participant_id SET NOT NULL; + + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_set_rot_state_pos_on_active_change() RETURNS TRIGGER AS +$$ +BEGIN + SELECT position INTO NEW.position + FROM rotation_participants + WHERE id = NEW.rotation_participant_id; + + RETURN NEW; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_set_rot_state_pos_on_part_reorder() RETURNS TRIGGER AS +$$ +BEGIN + UPDATE rotation_state + SET position = NEW.position + WHERE rotation_participant_id = NEW.id; + + RETURN NEW; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +ALTER TABLE rotations + ADD COLUMN participant_count INT NOT NULL DEFAULT 0; + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_incr_part_count_on_add() RETURNS TRIGGER AS +$$ +BEGIN + UPDATE rotations + SET participant_count = participant_count + 1 + WHERE id = NEW.rotation_id; + + RETURN NEW; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_decr_part_count_on_del() RETURNS TRIGGER AS +$$ +BEGIN + UPDATE rotations + SET participant_count = participant_count - 1 + WHERE id = OLD.rotation_id; + + RETURN OLD; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_start_rotation_on_first_part_add() RETURNS TRIGGER AS +$$ +DECLARE + first_part UUID; +BEGIN + SELECT id + INTO first_part + FROM rotation_participants + WHERE rotation_id = NEW.rotation_id AND position = 0; + + INSERT INTO rotation_state ( + rotation_id, rotation_participant_id, shift_start + ) VALUES ( + NEW.rotation_id, first_part, now() + ) ON CONFLICT DO NOTHING; + + RETURN NEW; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_advance_or_end_rot_on_part_del() RETURNS TRIGGER AS +$$ +DECLARE + new_part UUID; + active_part UUID; +BEGIN + + SELECT rotation_participant_id + INTO active_part + FROM rotation_state + WHERE rotation_id = OLD.rotation_id; + + IF active_part != OLD.id THEN + RETURN OLD; + END IF; + + SELECT id + INTO new_part + FROM rotation_participants + WHERE + rotation_id = OLD.rotation_id AND + id != OLD.id AND + position IN (0, OLD.position) + ORDER BY position DESC + LIMIT 1; + + IF new_part ISNULL THEN + DELETE FROM rotation_state + WHERE rotation_id = OLD.rotation_id; + ELSE + UPDATE rotation_state + SET rotation_participant_id = new_part + WHERE rotation_id = OLD.rotation_id; + END IF; + + RETURN OLD; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + + + +LOCK rotation_participants; +WITH part_count AS ( + SELECT rotation_id, count(*) + FROM rotation_participants + GROUP BY rotation_id +) +UPDATE rotations +SET participant_count = part_count.count +FROM part_count +WHERE part_count.rotation_id = rotations.id; + +INSERT INTO rotation_state (rotation_id, rotation_participant_id, shift_start) +SELECT rotation_id, id, now() +FROM rotation_participants +WHERE position = 0 +ON CONFLICT (rotation_id) DO NOTHING; + +CREATE TRIGGER trg_set_rot_state_pos_on_active_change +BEFORE UPDATE ON rotation_state +FOR EACH ROW +WHEN (NEW.rotation_participant_id != OLD.rotation_participant_id) +EXECUTE PROCEDURE fn_set_rot_state_pos_on_active_change(); + +CREATE TRIGGER trg_set_rot_state_pos_on_part_reorder +BEFORE UPDATE ON rotation_participants +FOR EACH ROW +WHEN (NEW.position != OLD.position) +EXECUTE PROCEDURE fn_set_rot_state_pos_on_part_reorder(); + +CREATE TRIGGER trg_incr_part_count_on_add +BEFORE INSERT ON rotation_participants +FOR EACH ROW +EXECUTE PROCEDURE fn_incr_part_count_on_add(); + + +CREATE TRIGGER trg_start_rotation_on_first_part_add +AFTER INSERT ON rotation_participants +FOR EACH ROW +EXECUTE PROCEDURE fn_start_rotation_on_first_part_add(); + + +CREATE TRIGGER trg_10_decr_part_count_on_del +BEFORE DELETE ON rotation_participants +FOR EACH ROW +EXECUTE PROCEDURE fn_decr_part_count_on_del(); + + +DROP TRIGGER trg_decr_rot_part_position_on_delete ON rotation_participants; + +CREATE TRIGGER trg_20_decr_rot_part_position_on_delete +BEFORE DELETE ON rotation_participants +FOR EACH ROW +EXECUTE PROCEDURE fn_decr_rot_part_position_on_delete(); + +CREATE TRIGGER trg_30_advance_or_end_rot_on_part_del +BEFORE DELETE ON rotation_participants +FOR EACH ROW +EXECUTE PROCEDURE fn_advance_or_end_rot_on_part_del(); + +-- +migrate Down + +ALTER TABLE rotation_state + ALTER rotation_participant_id DROP NOT NULL, + DROP CONSTRAINT rotation_state_rotation_participant_id_fkey, + ADD CONSTRAINT rotation_state_rotation_participant_id_fkey + FOREIGN KEY (rotation_participant_id) + REFERENCES rotation_participants (id) + ON DELETE SET NULL; + +DROP TRIGGER trg_set_rot_state_pos_on_active_change ON rotation_state; +DROP TRIGGER trg_set_rot_state_pos_on_part_reorder ON rotation_participants; +DROP TRIGGER trg_incr_part_count_on_add ON rotation_participants; +DROP TRIGGER trg_start_rotation_on_first_part_add ON rotation_participants; +DROP TRIGGER trg_10_decr_part_count_on_del ON rotation_participants; + +DROP TRIGGER trg_20_decr_rot_part_position_on_delete ON rotation_participants; + +CREATE TRIGGER trg_decr_rot_part_position_on_delete +BEFORE DELETE ON rotation_participants +FOR EACH ROW +EXECUTE PROCEDURE fn_decr_rot_part_position_on_delete(); + +DROP TRIGGER trg_30_advance_or_end_rot_on_part_del ON rotation_participants; + +DROP FUNCTION fn_set_rot_state_pos_on_active_change(); +DROP FUNCTION fn_set_rot_state_pos_on_part_reorder(); +DROP FUNCTION fn_incr_part_count_on_add(); +DROP FUNCTION fn_decr_part_count_on_del(); +DROP FUNCTION fn_start_rotation_on_first_part_add(); +DROP FUNCTION fn_advance_or_end_rot_on_part_del(); + +ALTER TABLE rotations + DROP COLUMN participant_count; diff --git a/migrate/migrations/20180320153326-npcycle-indexes.sql b/migrate/migrations/20180320153326-npcycle-indexes.sql new file mode 100644 index 0000000000..b5cb823c3b --- /dev/null +++ b/migrate/migrations/20180320153326-npcycle-indexes.sql @@ -0,0 +1,14 @@ + +-- +migrate Up + +CREATE INDEX idx_notif_rule_creation_time on user_notification_rules (user_id, created_at); +CREATE INDEX idx_outgoing_messages_notif_cycle on outgoing_messages (cycle_id); +ALTER TABLE notification_policy_cycles SET (fillfactor = 65); +ALTER TABLE outgoing_messages SET (fillfactor = 85); + +-- +migrate Down + +DROP INDEX idx_notif_rule_creation_time; +DROP INDEX idx_outgoing_messages_notif_cycle; +ALTER TABLE notification_policy_cycles RESET (fillfactor); +ALTER TABLE outgoing_messages RESET (fillfactor); diff --git a/migrate/migrations/20180321143255-ep-step-count.sql b/migrate/migrations/20180321143255-ep-step-count.sql new file mode 100644 index 0000000000..4133c83e18 --- /dev/null +++ b/migrate/migrations/20180321143255-ep-step-count.sql @@ -0,0 +1,68 @@ + +-- +migrate Up + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_incr_ep_step_count_on_add() RETURNS TRIGGER AS +$$ +BEGIN + UPDATE escalation_policies + SET step_count = step_count + 1 + WHERE id = NEW.escalation_policy_id; + + RETURN NEW; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_decr_ep_step_count_on_del() RETURNS TRIGGER AS +$$ +BEGIN + UPDATE escalation_policies + SET step_count = step_count - 1 + WHERE id = OLD.escalation_policy_id; + + RETURN OLD; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + + +ALTER TABLE escalation_policies + ADD COLUMN step_count INT NOT NULL DEFAULT 0; + +LOCK escalation_policy_steps IN EXCLUSIVE MODE; + +WITH counts AS ( + SELECT escalation_policy_id, count(*) + FROM escalation_policy_steps + GROUP BY escalation_policy_id +) +UPDATE escalation_policies +SET step_count = counts.count +FROM counts +WHERE counts.escalation_policy_id = escalation_policies.id; + + +CREATE TRIGGER trg_10_incr_ep_step_count_on_add +BEFORE INSERT ON escalation_policy_steps +FOR EACH ROW +EXECUTE PROCEDURE fn_incr_ep_step_count_on_add(); + +CREATE TRIGGER trg_10_decr_ep_step_count_on_del +BEFORE DELETE ON escalation_policy_steps +FOR EACH ROW +EXECUTE PROCEDURE fn_decr_ep_step_count_on_del(); + + +-- +migrate Down + +DROP TRIGGER trg_10_incr_ep_step_count_on_add on escalation_policy_steps; +DROP TRIGGER trg_10_decr_ep_step_count_on_del on escalation_policy_steps; + +DROP FUNCTION fn_decr_ep_step_count_on_del(); +DROP FUNCTION fn_incr_ep_step_count_on_add(); + +ALTER TABLE escalation_policies + DROP COLUMN step_count; + diff --git a/migrate/migrations/20180321145054-strict-ep-state.sql b/migrate/migrations/20180321145054-strict-ep-state.sql new file mode 100644 index 0000000000..15b41672d1 --- /dev/null +++ b/migrate/migrations/20180321145054-strict-ep-state.sql @@ -0,0 +1,103 @@ + +-- +migrate Up + +-- Don't allow alert state change or creation, service EP change, or state changes +-- while changing everything. +LOCK services, escalation_policy_state IN EXCLUSIVE MODE; + + +-- Make alert_id the primary key, require +ALTER TABLE escalation_policy_state + ADD PRIMARY KEY (alert_id), + ADD COLUMN service_id UUID REFERENCES services (id) ON DELETE CASCADE, + DROP CONSTRAINT escalation_policy_state_alert_id_escalation_policy_id_key; + + +CREATE INDEX idx_escalation_policy_state_policy_ids ON escalation_policy_state (escalation_policy_id, service_id); + +-- Set service_id col, then enforce NOT NULL. +UPDATE escalation_policy_state +SET service_id = a.service_id +FROM alerts a +WHERE a.id = alert_id; + +ALTER TABLE escalation_policy_state + ALTER service_id SET NOT NULL; + + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_set_ep_state_svc_id_on_insert() RETURNS TRIGGER AS +$$ +BEGIN + SELECT service_id INTO NEW.service_id + FROM alerts + WHERE id = NEW.alert_id; + + RETURN NEW; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +CREATE TRIGGER trg_10_set_ep_state_svc_id_on_insert +BEFORE INSERT ON escalation_policy_state +FOR EACH ROW +WHEN (NEW.service_id ISNULL) +EXECUTE PROCEDURE fn_set_ep_state_svc_id_on_insert(); + + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_clear_ep_state_on_svc_ep_change() RETURNS TRIGGER AS +$$ +BEGIN + DELETE FROM escalation_policy_state + WHERE service_id = NEW.id; + + RETURN NEW; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +CREATE TRIGGER trg_10_clear_ep_state_on_svc_ep_change +BEFORE UPDATE ON services +FOR EACH ROW +WHEN (OLD.escalation_policy_id != NEW.escalation_policy_id) +EXECUTE PROCEDURE fn_clear_ep_state_on_svc_ep_change(); + + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_clear_ep_state_on_alert_close() RETURNS TRIGGER AS +$$ +BEGIN + DELETE FROM escalation_policy_state + WHERE alert_id = NEW.id; + + RETURN NEW; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +CREATE TRIGGER trg_10_clear_ep_state_on_alert_close +AFTER UPDATE ON alerts +FOR EACH ROW +WHEN (OLD.status != NEW.status AND NEW.status = 'closed') +EXECUTE PROCEDURE fn_clear_ep_state_on_alert_close(); + +ALTER TABLE escalation_policy_state SET (fillfactor = 85); + +-- +migrate Down + +DROP INDEX idx_escalation_policy_state_policy_ids; +ALTER TABLE escalation_policy_state RESET (fillfactor); + +DROP TRIGGER trg_10_clear_ep_state_on_alert_close ON alerts; +DROP TRIGGER trg_10_clear_ep_state_on_svc_ep_change ON services; +DROP TRIGGER trg_10_set_ep_state_svc_id_on_insert ON escalation_policy_state; + +DROP FUNCTION fn_set_ep_state_svc_id_on_insert(); +DROP FUNCTION fn_clear_ep_state_on_svc_ep_change(); +DROP FUNCTION fn_clear_ep_state_on_alert_close(); + +ALTER TABLE escalation_policy_state + DROP CONSTRAINT escalation_policy_state_pkey, + DROP COLUMN service_id, + ADD UNIQUE(alert_id, escalation_policy_id); diff --git a/migrate/migrations/20180326154252-move-rotation-triggers.sql b/migrate/migrations/20180326154252-move-rotation-triggers.sql new file mode 100644 index 0000000000..c7e28fe94d --- /dev/null +++ b/migrate/migrations/20180326154252-move-rotation-triggers.sql @@ -0,0 +1,23 @@ + +-- +migrate Up + +LOCK rotation_participants; + +DROP TRIGGER trg_20_decr_rot_part_position_on_delete ON rotation_participants; +DROP TRIGGER trg_30_advance_or_end_rot_on_part_del ON rotation_participants; + + + +CREATE TRIGGER trg_20_decr_rot_part_position_on_delete AFTER DELETE ON rotation_participants FOR EACH ROW EXECUTE PROCEDURE fn_decr_rot_part_position_on_delete(); +CREATE TRIGGER trg_30_advance_or_end_rot_on_part_del AFTER DELETE ON rotation_participants FOR EACH ROW EXECUTE PROCEDURE fn_advance_or_end_rot_on_part_del(); + + +-- +migrate Down + +LOCK rotation_participants; + +DROP TRIGGER trg_20_decr_rot_part_position_on_delete ON rotation_participants; +DROP TRIGGER trg_30_advance_or_end_rot_on_part_del ON rotation_participants; + +CREATE TRIGGER trg_20_decr_rot_part_position_on_delete BEFORE DELETE ON rotation_participants FOR EACH ROW EXECUTE PROCEDURE fn_decr_rot_part_position_on_delete(); +CREATE TRIGGER trg_30_advance_or_end_rot_on_part_del BEFORE DELETE ON rotation_participants FOR EACH ROW EXECUTE PROCEDURE fn_advance_or_end_rot_on_part_del(); diff --git a/migrate/migrations/20180330110116-move-ep-triggers.sql b/migrate/migrations/20180330110116-move-ep-triggers.sql new file mode 100644 index 0000000000..08f4fd345d --- /dev/null +++ b/migrate/migrations/20180330110116-move-ep-triggers.sql @@ -0,0 +1,17 @@ + +-- +migrate Up + +LOCK escalation_policy_steps; +DROP TRIGGER trg_decr_ep_step_number_on_delete ON escalation_policy_steps; +CREATE TRIGGER trg_decr_ep_step_number_on_delete +AFTER DELETE ON escalation_policy_steps +FOR EACH ROW +EXECUTE PROCEDURE fn_decr_ep_step_number_on_delete(); + +-- +migrate Down +LOCK escalation_policy_steps; +DROP TRIGGER trg_decr_ep_step_number_on_delete ON escalation_policy_steps; +CREATE TRIGGER trg_decr_ep_step_number_on_delete +BEFORE DELETE ON escalation_policy_steps +FOR EACH ROW +EXECUTE PROCEDURE fn_decr_ep_step_number_on_delete(); diff --git a/migrate/migrations/20180403113645-fix-rot-part-delete.sql b/migrate/migrations/20180403113645-fix-rot-part-delete.sql new file mode 100644 index 0000000000..100bbddf11 --- /dev/null +++ b/migrate/migrations/20180403113645-fix-rot-part-delete.sql @@ -0,0 +1,104 @@ +-- +migrate Up + +LOCK rotation_participants; + + + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_advance_or_end_rot_on_part_del() RETURNS TRIGGER AS +$$ +DECLARE + new_part UUID; + active_part UUID; +BEGIN + + SELECT rotation_participant_id + INTO active_part + FROM rotation_state + WHERE rotation_id = OLD.rotation_id; + + IF active_part != OLD.id THEN + RETURN OLD; + END IF; + + SELECT id + INTO new_part + FROM rotation_participants + WHERE + rotation_id = OLD.rotation_id AND + id != OLD.id AND + position IN (0, OLD.position+1) + ORDER BY position DESC + LIMIT 1; + + IF new_part ISNULL THEN + DELETE FROM rotation_state + WHERE rotation_id = OLD.rotation_id; + ELSE + UPDATE rotation_state + SET rotation_participant_id = new_part + WHERE rotation_id = OLD.rotation_id; + END IF; + + RETURN OLD; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + + +DROP TRIGGER trg_30_advance_or_end_rot_on_part_del ON rotation_participants; + +CREATE TRIGGER trg_30_advance_or_end_rot_on_part_del BEFORE DELETE ON rotation_participants FOR EACH ROW EXECUTE PROCEDURE fn_advance_or_end_rot_on_part_del(); + + +-- +migrate Down + +LOCK rotation_participants; + + + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_advance_or_end_rot_on_part_del() RETURNS TRIGGER AS +$$ +DECLARE + new_part UUID; + active_part UUID; +BEGIN + + SELECT rotation_participant_id + INTO active_part + FROM rotation_state + WHERE rotation_id = OLD.rotation_id; + + IF active_part != OLD.id THEN + RETURN OLD; + END IF; + + SELECT id + INTO new_part + FROM rotation_participants + WHERE + rotation_id = OLD.rotation_id AND + id != OLD.id AND + position IN (0, OLD.position) + ORDER BY position DESC + LIMIT 1; + + IF new_part ISNULL THEN + DELETE FROM rotation_state + WHERE rotation_id = OLD.rotation_id; + ELSE + UPDATE rotation_state + SET rotation_participant_id = new_part + WHERE rotation_id = OLD.rotation_id; + END IF; + + RETURN OLD; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + + +DROP TRIGGER trg_30_advance_or_end_rot_on_part_del ON rotation_participants; + +CREATE TRIGGER trg_30_advance_or_end_rot_on_part_del AFTER DELETE ON rotation_participants FOR EACH ROW EXECUTE PROCEDURE fn_advance_or_end_rot_on_part_del(); diff --git a/migrate/migrations/20180417142940-region-processing.sql b/migrate/migrations/20180417142940-region-processing.sql new file mode 100644 index 0000000000..4040c93bfc --- /dev/null +++ b/migrate/migrations/20180417142940-region-processing.sql @@ -0,0 +1,9 @@ + +-- +migrate Up +CREATE TABLE region_ids ( + name TEXT PRIMARY KEY, + id SERIAL UNIQUE + +); +-- +migrate Down +DROP TABLE region_ids; \ No newline at end of file diff --git a/migrate/migrations/20180517100033-clear-cycles-on-policy-change.sql b/migrate/migrations/20180517100033-clear-cycles-on-policy-change.sql new file mode 100644 index 0000000000..82a3a2a128 --- /dev/null +++ b/migrate/migrations/20180517100033-clear-cycles-on-policy-change.sql @@ -0,0 +1,24 @@ + +-- +migrate Up + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_clear_np_cycles_on_state_delete() RETURNS TRIGGER AS +$$ +BEGIN + DELETE FROM notification_policy_cycles + WHERE alert_id = OLD.alert_id; + + RETURN OLD; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +CREATE TRIGGER trg_clear_np_cycles_on_state_delete +AFTER DELETE ON escalation_policy_state +FOR EACH ROW +EXECUTE PROCEDURE fn_clear_np_cycles_on_state_delete(); + +-- +migrate Down +DROP TRIGGER trg_clear_np_cycles_on_state_delete ON escalation_policy_state; +DROP FUNCTION fn_clear_np_cycles_on_state_delete(); + diff --git a/migrate/migrations/20180517135700-policy-reassignment-trigger-fix.sql b/migrate/migrations/20180517135700-policy-reassignment-trigger-fix.sql new file mode 100644 index 0000000000..041443efe6 --- /dev/null +++ b/migrate/migrations/20180517135700-policy-reassignment-trigger-fix.sql @@ -0,0 +1,55 @@ + +-- +migrate Up + +LOCK escalation_policy_state; + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_reset_ep_state_on_ep_change() RETURNS TRIGGER AS +$$ +BEGIN + + NEW.escalation_policy_step_number = 0; + NEW.loop_count = 0; + NEW.force_escalation = FALSE; + NEW.last_escalation = now(); + + SELECT id INTO NEW.escalation_policy_step_id + FROM escalation_policy_steps + WHERE + step_number = 0 and + escalation_policy_id = NEW.escalation_policy_id; + + DELETE FROM notification_policy_cycles + WHERE service_id = NEW.service_id; + + RETURN NEW; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +CREATE TRIGGER trg_reset_ep_state_on_ep_change +AFTER UPDATE ON escalation_policy_state +FOR EACH ROW +WHEN (NEW.escalation_policy_id <> OLD.escalation_policy_id) +EXECUTE PROCEDURE fn_reset_ep_state_on_ep_change(); + +ALTER TABLE services + ADD CONSTRAINT svc_ep_uniq UNIQUE(id, escalation_policy_id); + +ALTER TABLE escalation_policy_state + ADD CONSTRAINT svc_ep_fkey + FOREIGN KEY (service_id, escalation_policy_id) + REFERENCES services (id, escalation_policy_id) + ON DELETE CASCADE + ON UPDATE CASCADE; + +-- +migrate Down + +ALTER TABLE escalation_policy_state + DROP CONSTRAINT svc_ep_fkey; +ALTER TABLE services + DROP CONSTRAINT svc_ep_uniq; + + +DROP TRIGGER trg_reset_ep_state_on_ep_change ON escalation_policy_state; +DROP FUNCTION fn_reset_ep_state_on_ep_change(); diff --git a/migrate/migrations/20180517210000-auth2.sql b/migrate/migrations/20180517210000-auth2.sql new file mode 100644 index 0000000000..b91217226a --- /dev/null +++ b/migrate/migrations/20180517210000-auth2.sql @@ -0,0 +1,86 @@ + +-- +migrate Up + +CREATE TABLE auth_user_sessions ( + id UUID PRIMARY KEY, + created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), + user_agent TEXT NOT NULL DEFAULT '', + user_id UUID REFERENCES users (id) ON DELETE CASCADE +); + +CREATE TABLE auth_subjects ( + provider_id TEXT NOT NULL, + subject_id TEXT NOT NULL, + user_id UUID NOT NULL REFERENCES users (id) ON DELETE CASCADE, + PRIMARY KEY (provider_id, subject_id) +) +WITH (fillfactor = 80); + +LOCK auth_github_users, auth_basic_users; + +INSERT INTO auth_subjects (provider_id, subject_id, user_id) +SELECT 'github', github_id, user_id +FROM auth_github_users; + +INSERT INTO auth_subjects (provider_id, subject_id, user_id) +SELECT 'basic', username, user_id +FROM auth_basic_users; + + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_insert_basic_user() RETURNS TRIGGER AS +$$ +BEGIN + + INSERT INTO auth_subjects (provider_id, subject_id, user_id) + VALUES ('basic', NEW.username, NEW.user_id) + ON CONFLICT (provider_id, subject_id) DO UPDATE + SET user_id = NEW.user_id; + + RETURN NEW; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_insert_github_user() RETURNS TRIGGER AS +$$ +BEGIN + + INSERT INTO auth_subjects (provider_id, subject_id, user_id) + VALUES ('github', NEW.github_id::text, NEW.user_id) + ON CONFLICT (provider_id, subject_id) DO UPDATE + SET user_id = NEW.user_id; + + RETURN NEW; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +CREATE TRIGGER trg_insert_github_user +AFTER INSERT ON auth_github_users +FOR EACH ROW +EXECUTE PROCEDURE fn_insert_github_user(); + +CREATE TRIGGER trg_insert_basic_user +AFTER INSERT ON auth_basic_users +FOR EACH ROW +EXECUTE PROCEDURE fn_insert_basic_user(); + +-- +migrate Down + +DROP TRIGGER trg_insert_github_user ON auth_github_users; +DROP FUNCTION fn_insert_github_user(); + +DROP TRIGGER trg_insert_basic_user ON auth_basic_users; +DROP FUNCTION fn_insert_basic_user(); + +INSERT INTO auth_github_users (github_id, user_id) +SELECT subject_id, user_id +FROM auth_subjects +WHERE provider_id = 'github' +ON CONFLICT (github_id) DO UPDATE +SET user_id = excluded.user_id; + +DROP TABLE auth_subjects; +DROP TABLE auth_user_sessions; diff --git a/migrate/migrations/20180517220000-keyring.sql b/migrate/migrations/20180517220000-keyring.sql new file mode 100644 index 0000000000..57009548f7 --- /dev/null +++ b/migrate/migrations/20180517220000-keyring.sql @@ -0,0 +1,15 @@ + +-- +migrate Up + +CREATE TABLE keyring ( + id TEXT PRIMARY KEY, + verification_keys BYTEA NOT NULL, + signing_key BYTEA NOT NULL, + next_key BYTEA NOT NULL, + next_rotation TIMESTAMP WITH TIME ZONE, + rotation_count BIGINT NOT NULL +); + +-- +migrate Down + +DROP TABLE keyring; diff --git a/migrate/migrations/20180517230000-auth-nonce.sql b/migrate/migrations/20180517230000-auth-nonce.sql new file mode 100644 index 0000000000..753037c67b --- /dev/null +++ b/migrate/migrations/20180517230000-auth-nonce.sql @@ -0,0 +1,11 @@ + +-- +migrate Up + +CREATE TABLE auth_nonce ( + id UUID PRIMARY KEY, + created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now() +); + +-- +migrate Down + +DROP TABLE auth_nonce; diff --git a/migrate/migrations/20180521124533-UserFavorites.sql b/migrate/migrations/20180521124533-UserFavorites.sql new file mode 100644 index 0000000000..3935084c66 --- /dev/null +++ b/migrate/migrations/20180521124533-UserFavorites.sql @@ -0,0 +1,9 @@ +-- +migrate Up +CREATE TABLE user_favorites ( + user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + tgt_service_id UUID NOT NULL REFERENCES services(id) ON DELETE CASCADE, + UNIQUE (user_id, tgt_service_id) +); +-- +migrate Down +DROP TABLE user_favorites; + diff --git a/migrate/migrations/20180710110438-engine-processing-versions.sql b/migrate/migrations/20180710110438-engine-processing-versions.sql new file mode 100644 index 0000000000..9a5adb7205 --- /dev/null +++ b/migrate/migrations/20180710110438-engine-processing-versions.sql @@ -0,0 +1,34 @@ + +-- +migrate Up + +CREATE TYPE engine_processing_type AS ENUM ( + 'escalation', + 'heartbeat', + 'np_cycle', + 'rotation', + 'schedule', + 'status_update', + 'verify', + 'message' +); + +CREATE TABLE engine_processing_versions ( + type_id engine_processing_type PRIMARY KEY, + version INT NOT NULL DEFAULT 1 +); + +INSERT INTO engine_processing_versions (type_id) +VALUES + ('escalation'), + ('heartbeat'), + ('np_cycle'), + ('rotation'), + ('schedule'), + ('status_update'), + ('verify'), + ('message'); + +-- +migrate Down + +DROP TABLE engine_processing_versions; +DROP TYPE engine_processing_type; diff --git a/migrate/migrations/20180720121433-increment-module-versions.sql b/migrate/migrations/20180720121433-increment-module-versions.sql new file mode 100644 index 0000000000..6b6da21b33 --- /dev/null +++ b/migrate/migrations/20180720121433-increment-module-versions.sql @@ -0,0 +1,20 @@ + +-- +migrate Up + +UPDATE engine_processing_versions +SET "version" = 2 +WHERE type_id = 'escalation'; + +UPDATE engine_processing_versions +SET "version" = 2 +WHERE type_id = 'np_cycle'; + +-- +migrate Down + +UPDATE engine_processing_versions +SET "version" = 1 +WHERE type_id = 'escalation'; + +UPDATE engine_processing_versions +SET "version" = 1 +WHERE type_id = 'np_cycle'; diff --git a/migrate/migrations/20180720121533-drop-dedup-trigger.sql b/migrate/migrations/20180720121533-drop-dedup-trigger.sql new file mode 100644 index 0000000000..6796189182 --- /dev/null +++ b/migrate/migrations/20180720121533-drop-dedup-trigger.sql @@ -0,0 +1,28 @@ + +-- +migrate Up + +DROP TRIGGER trg_ensure_alert_dedup_key ON public.alerts; +DROP FUNCTION fn_ensure_alert_dedup_key(); + +-- +migrate Down + +-- +migrate StatementBegin +CREATE FUNCTION fn_ensure_alert_dedup_key() RETURNS TRIGGER AS +$$ +BEGIN + IF NEW.dedup_key ISNULL THEN + NEW.dedup_key = + concat( + 'auto:1:', + encode(digest(concat(NEW."description"), 'sha512'), 'hex') + ); + END IF; + RETURN NEW; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +CREATE TRIGGER trg_ensure_alert_dedup_key +BEFORE INSERT ON public.alerts +FOR EACH ROW WHEN ((new.status <> 'closed'::enum_alert_status)) +EXECUTE PROCEDURE fn_ensure_alert_dedup_key(); diff --git a/migrate/migrations/20180720121633-drop-description-col.sql b/migrate/migrations/20180720121633-drop-description-col.sql new file mode 100644 index 0000000000..e2dfe16a9b --- /dev/null +++ b/migrate/migrations/20180720121633-drop-description-col.sql @@ -0,0 +1,46 @@ + +-- +migrate Up +DROP TRIGGER trg_00_ensure_description ON public.alerts; +DROP FUNCTION fn_alerts_ensure_description(); + +ALTER TABLE alerts + DROP COLUMN "description"; + +-- +migrate Down + +ALTER TABLE alerts + ADD COLUMN "description" text; + +UPDATE alerts +SET "description" = TRIM(TRAILING chr(10) FROM summary || chr(10) || details); + +ALTER TABLE alerts + ALTER COLUMN "description" SET NOT NULL; + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_alerts_ensure_description() RETURNS TRIGGER AS +$$ +BEGIN + IF NEW.description ISNULL THEN + NEW.description = TRIM(TRAILING chr(10) FROM NEW.summary || chr(10) || NEW.details); + END IF; + + IF NEW.summary ISNULL THEN + NEW.summary = split_part(NEW.description, chr(10), 1); + NEW.details = ( + case when strpos(NEW.description, chr(10)) > 0 then + substr(NEW.description, strpos(NEW.description, chr(10))+1) + else + '' + end + ); + END IF; + RETURN NEW; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +CREATE TRIGGER trg_00_ensure_description +BEFORE INSERT ON public.alerts +FOR EACH ROW +EXECUTE PROCEDURE fn_alerts_ensure_description(); diff --git a/migrate/migrations/20180720121733-fix-svc-ep-state-trigger.sql b/migrate/migrations/20180720121733-fix-svc-ep-state-trigger.sql new file mode 100644 index 0000000000..c1c9f709ff --- /dev/null +++ b/migrate/migrations/20180720121733-fix-svc-ep-state-trigger.sql @@ -0,0 +1,88 @@ + +-- +migrate Up +LOCK services, alerts, escalation_policy_state; + +DROP TRIGGER trg_reset_ep_state_on_ep_change ON escalation_policy_state; +DROP FUNCTION fn_reset_ep_state_on_ep_change(); + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_clear_ep_state_on_svc_ep_change() RETURNS TRIGGER AS +$$ +BEGIN + UPDATE escalation_policy_state + SET + escalation_policy_id = NEW.escalation_policy_id, + escalation_policy_step_id = NULL, + loop_count = 0, + last_escalation = NULL, + next_escalation = NULL, + force_escalation = false, + escalation_policy_step_number = 0 + WHERE service_id = NEW.id + ; + + RETURN NEW; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +DROP TRIGGER trg_10_clear_ep_state_on_svc_ep_change ON services; +CREATE TRIGGER trg_10_clear_ep_state_on_svc_ep_change +AFTER UPDATE ON public.services +FOR EACH ROW +WHEN ((old.escalation_policy_id <> new.escalation_policy_id)) +EXECUTE PROCEDURE fn_clear_ep_state_on_svc_ep_change(); + +-- +migrate Down + +LOCK services, alerts, escalation_policy_state; + +DROP TRIGGER trg_10_clear_ep_state_on_svc_ep_change ON services; +CREATE TRIGGER trg_10_clear_ep_state_on_svc_ep_change +BEFORE UPDATE ON public.services +FOR EACH ROW +WHEN ((old.escalation_policy_id <> new.escalation_policy_id)) +EXECUTE PROCEDURE fn_clear_ep_state_on_svc_ep_change(); + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_clear_ep_state_on_svc_ep_change() RETURNS TRIGGER AS +$$ +BEGIN + DELETE FROM escalation_policy_state + WHERE service_id = NEW.id; + + RETURN NEW; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_reset_ep_state_on_ep_change() RETURNS TRIGGER AS +$$ +BEGIN + + NEW.escalation_policy_step_number = 0; + NEW.loop_count = 0; + NEW.force_escalation = FALSE; + NEW.last_escalation = now(); + + SELECT id INTO NEW.escalation_policy_step_id + FROM escalation_policy_steps + WHERE + step_number = 0 and + escalation_policy_id = NEW.escalation_policy_id; + + DELETE FROM notification_policy_cycles + WHERE service_id = NEW.service_id; + + RETURN NEW; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +CREATE TRIGGER trg_reset_ep_state_on_ep_change +AFTER UPDATE ON escalation_policy_state +FOR EACH ROW +WHEN (NEW.escalation_policy_id <> OLD.escalation_policy_id) +EXECUTE PROCEDURE fn_reset_ep_state_on_ep_change(); diff --git a/migrate/migrations/20180720121833-create-ep-state-on-alert.sql b/migrate/migrations/20180720121833-create-ep-state-on-alert.sql new file mode 100644 index 0000000000..f192db041d --- /dev/null +++ b/migrate/migrations/20180720121833-create-ep-state-on-alert.sql @@ -0,0 +1,80 @@ + +-- +migrate Up + +LOCK alerts, escalation_policy_state; + +ALTER TABLE escalation_policy_state + ALTER COLUMN last_escalation DROP NOT NULL, + ALTER COLUMN last_escalation DROP DEFAULT; + +INSERT INTO escalation_policy_state (alert_id, service_id, escalation_policy_id) +SELECT a.id, a.service_id, svc.escalation_policy_id +FROM alerts a +JOIN services svc ON svc.id = a.service_id +JOIN escalation_policies ep ON ep.id = svc.escalation_policy_id AND ep.step_count > 0 +WHERE a.status != 'closed' +ON CONFLICT (alert_id) DO NOTHING; + + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_insert_ep_state_on_step_insert() RETURNS TRIGGER AS +$$ +BEGIN + + INSERT INTO escalation_policy_state (alert_id, service_id, escalation_policy_id) + SELECT a.id, a.service_id, NEW.escalation_policy_id + FROM alerts a + JOIN services svc ON + svc.id = a.service_id AND + svc.escalation_policy_id = NEW.escalation_policy_id + WHERE a.status != 'closed'; + + RETURN NEW; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_insert_ep_state_on_alert_insert() RETURNS TRIGGER AS +$$ +BEGIN + + INSERT INTO escalation_policy_state (alert_id, service_id, escalation_policy_id) + SELECT NEW.id, NEW.service_id, svc.escalation_policy_id + FROM services svc + JOIN escalation_policies ep ON ep.id = svc.escalation_policy_id AND ep.step_count > 0 + WHERE svc.id = NEW.service_id; + + RETURN NEW; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +CREATE TRIGGER trg_10_insert_ep_state_on_alert_insert +AFTER INSERT ON public.alerts +FOR EACH ROW +WHEN (new.status != 'closed'::enum_alert_status) +EXECUTE PROCEDURE fn_insert_ep_state_on_alert_insert(); + +CREATE TRIGGER trg_10_insert_ep_state_on_step_insert +AFTER INSERT ON public.escalation_policy_steps +FOR EACH ROW +WHEN (NEW.step_number = 0) +EXECUTE PROCEDURE fn_insert_ep_state_on_step_insert(); + + +-- +migrate Down +LOCK alerts, escalation_policy_state; + +DELETE FROM escalation_policy_state +WHERE last_escalation isnull; + +ALTER TABLE escalation_policy_state + ALTER COLUMN last_escalation SET NOT NULL, + ALTER COLUMN last_escalation SET DEFAULT now(); + +DROP TRIGGER trg_10_insert_ep_state_on_alert_insert ON public.alerts; +DROP FUNCTION fn_insert_ep_state_on_alert_insert(); + +DROP TRIGGER trg_10_insert_ep_state_on_step_insert ON public.escalation_policy_steps; +DROP FUNCTION fn_insert_ep_state_on_step_insert(); diff --git a/migrate/migrations/20180720121933-store-next-escalation-time.sql b/migrate/migrations/20180720121933-store-next-escalation-time.sql new file mode 100644 index 0000000000..881a9b7b45 --- /dev/null +++ b/migrate/migrations/20180720121933-store-next-escalation-time.sql @@ -0,0 +1,12 @@ + +-- +migrate Up +ALTER TABLE escalation_policy_state + ADD COLUMN next_escalation TIMESTAMP WITH TIME ZONE; + +CREATE INDEX ON escalation_policy_state (next_escalation, force_escalation); + +-- +migrate Down + +ALTER TABLE escalation_policy_state + DROP COLUMN next_escalation; + diff --git a/migrate/migrations/20180720122033-ep-step-on-call.sql b/migrate/migrations/20180720122033-ep-step-on-call.sql new file mode 100644 index 0000000000..97c032453f --- /dev/null +++ b/migrate/migrations/20180720122033-ep-step-on-call.sql @@ -0,0 +1,17 @@ + +-- +migrate Up + +CREATE TABLE ep_step_on_call_users ( + user_id UUID NOT NULL REFERENCES users (id) ON DELETE CASCADE, + ep_step_id UUID NOT NULL REFERENCES escalation_policy_steps (id) ON DELETE CASCADE, + start_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), + end_time TIMESTAMP WITH TIME ZONE +); + +CREATE UNIQUE INDEX idx_ep_step_on_call +ON ep_step_on_call_users (user_id, ep_step_id) +WHERE end_time IS NULL; + +-- +migrate Down + +DROP TABLE ep_step_on_call_users; diff --git a/migrate/migrations/20180720122133-clear-next-esc-on-ack.sql b/migrate/migrations/20180720122133-clear-next-esc-on-ack.sql new file mode 100644 index 0000000000..7373158e65 --- /dev/null +++ b/migrate/migrations/20180720122133-clear-next-esc-on-ack.sql @@ -0,0 +1,27 @@ + +-- +migrate Up + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_clear_next_esc_on_alert_ack() RETURNS TRIGGER AS +$$ +BEGIN + + UPDATE escalation_policy_state + SET next_escalation = null + WHERE alert_id = NEW.id; + + RETURN NEW; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +CREATE TRIGGER trg_20_clear_next_esc_on_alert_ack +AFTER UPDATE ON public.alerts +FOR EACH ROW +WHEN (new.status != old.status and old.status = 'active'::enum_alert_status) +EXECUTE PROCEDURE fn_clear_next_esc_on_alert_ack(); + +-- +migrate Down + +DROP TRIGGER trg_20_clear_next_esc_on_alert_ack ON alerts; +DROP FUNCTION fn_clear_next_esc_on_alert_ack(); diff --git a/migrate/migrations/20180720122233-drop-unique-cycles-constraint.sql b/migrate/migrations/20180720122233-drop-unique-cycles-constraint.sql new file mode 100644 index 0000000000..0dc80cf479 --- /dev/null +++ b/migrate/migrations/20180720122233-drop-unique-cycles-constraint.sql @@ -0,0 +1,9 @@ + +-- +migrate Up +ALTER TABLE notification_policy_cycles + DROP CONSTRAINT notification_policy_cycles_user_id_alert_id_key; + +-- +migrate Down + +ALTER TABLE notification_policy_cycles + ADD CONSTRAINT notification_policy_cycles_user_id_alert_id_key UNIQUE (user_id, alert_id); diff --git a/migrate/migrations/20180720122333-fix-schedule-index.sql b/migrate/migrations/20180720122333-fix-schedule-index.sql new file mode 100644 index 0000000000..4deb6eb09b --- /dev/null +++ b/migrate/migrations/20180720122333-fix-schedule-index.sql @@ -0,0 +1,13 @@ + +-- +migrate Up +ALTER TABLE schedule_on_call_users + DROP CONSTRAINT schedule_on_call_users_schedule_id_user_id_end_time_key; + +CREATE UNIQUE INDEX idx_schedule_on_call_once +ON schedule_on_call_users (schedule_id, user_id) +WHERE end_time ISNULL; + +-- +migrate Down +DROP INDEX idx_schedule_on_call_once; +ALTER TABLE schedule_on_call_users + ADD CONSTRAINT schedule_on_call_users_schedule_id_user_id_end_time_key UNIQUE(schedule_id, user_id, end_time); diff --git a/migrate/migrations/20180720122433-trig-alert-on-force-escalation.sql b/migrate/migrations/20180720122433-trig-alert-on-force-escalation.sql new file mode 100644 index 0000000000..f64ecaa0b0 --- /dev/null +++ b/migrate/migrations/20180720122433-trig-alert-on-force-escalation.sql @@ -0,0 +1,52 @@ + +-- +migrate Up + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_lock_svc_on_force_escalation() RETURNS TRIGGER AS +$$ +BEGIN + + -- lock service first + PERFORM 1 + FROM services svc + WHERE svc.id = NEW.service_id + FOR UPDATE; + + RETURN NEW; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_trig_alert_on_force_escalation() RETURNS TRIGGER AS +$$ +BEGIN + + UPDATE alerts + SET "status" = 'triggered' + WHERE id = NEW.alert_id; + + RETURN NEW; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + + +CREATE TRIGGER trg_20_lock_svc_on_force_escalation +BEFORE UPDATE ON public.escalation_policy_state +FOR EACH ROW +WHEN (new.force_escalation != old.force_escalation and new.force_escalation) +EXECUTE PROCEDURE fn_lock_svc_on_force_escalation(); + +CREATE TRIGGER trg_30_trig_alert_on_force_escalation +AFTER UPDATE ON public.escalation_policy_state +FOR EACH ROW +WHEN (new.force_escalation != old.force_escalation and new.force_escalation) +EXECUTE PROCEDURE fn_trig_alert_on_force_escalation(); + +-- +migrate Down + +DROP TRIGGER trg_20_lock_svc_on_force_escalation on escalation_policy_state; +DROP TRIGGER trg_30_trig_alert_on_force_escalation on escalation_policy_state; +DROP FUNCTION fn_trig_alert_on_force_escalation(); +DROP FUNCTION fn_lock_svc_on_force_escalation(); \ No newline at end of file diff --git a/migrate/migrations/20180720122533-drop-ep-state-np-trig.sql b/migrate/migrations/20180720122533-drop-ep-state-np-trig.sql new file mode 100644 index 0000000000..62fb317af8 --- /dev/null +++ b/migrate/migrations/20180720122533-drop-ep-state-np-trig.sql @@ -0,0 +1,23 @@ + +-- +migrate Up +DROP TRIGGER trg_clear_np_cycles_on_state_delete ON escalation_policy_state; +DROP FUNCTION fn_clear_np_cycles_on_state_delete(); + +-- +migrate Down + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_clear_np_cycles_on_state_delete() RETURNS TRIGGER AS +$$ +BEGIN + DELETE FROM notification_policy_cycles + WHERE alert_id = OLD.alert_id; + + RETURN OLD; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +CREATE TRIGGER trg_clear_np_cycles_on_state_delete +AFTER DELETE ON public.escalation_policy_state +FOR EACH ROW +EXECUTE PROCEDURE fn_clear_np_cycles_on_state_delete(); diff --git a/migrate/migrations/20180720122633-update-existing-escalations.sql b/migrate/migrations/20180720122633-update-existing-escalations.sql new file mode 100644 index 0000000000..8be8c11426 --- /dev/null +++ b/migrate/migrations/20180720122633-update-existing-escalations.sql @@ -0,0 +1,42 @@ + +-- +migrate Up + +LOCK escalation_policy_state, escalation_policy_steps, notification_policy_cycles; + +UPDATE escalation_policy_state state +SET next_escalation = last_escalation + cast(cast(step.delay as text)||' minutes' as interval) +FROM escalation_policy_steps step +WHERE next_escalation isnull and step.id = state.escalation_policy_step_id; + + +-- pre-populate on call +with on_call as ( + select distinct + step.id step_id, + coalesce(act.user_id, part.user_id, sched.user_id) user_id + from escalation_policy_steps step + join escalation_policy_actions act on act.escalation_policy_step_id = step.id + left join rotation_state rState on rState.rotation_id = act.rotation_id + left join rotation_participants part on part.id = rState.rotation_participant_id + left join schedule_on_call_users sched on sched.schedule_id = act.schedule_id and sched.end_time isnull + where coalesce(act.user_id, part.user_id, sched.user_id) notnull +), new_on_call as ( + insert into ep_step_on_call_users (ep_step_id, user_id) + select step_id, user_id + from on_call + on conflict do nothing + returning ep_step_id, user_id +) +insert into notification_policy_cycles (alert_id, user_id) +select state.alert_id, oncall.user_id +from escalation_policy_state state +join alerts a on a.id = state.alert_id and a.status = 'triggered' +join new_on_call oncall on oncall.ep_step_id = state.escalation_policy_step_id +except +select alert_id, user_id +from notification_policy_cycles; + +-- +migrate Down + +UPDATE escalation_policy_state +SET next_escalation = null; diff --git a/migrate/migrations/20180728150427-add-provider-msg-id.sql b/migrate/migrations/20180728150427-add-provider-msg-id.sql new file mode 100644 index 0000000000..6ea8d62465 --- /dev/null +++ b/migrate/migrations/20180728150427-add-provider-msg-id.sql @@ -0,0 +1,23 @@ + +-- +migrate Up + +UPDATE engine_processing_versions +SET "version" = 2 +WHERE type_id = 'message'; + +ALTER TABLE outgoing_messages + ADD COLUMN provider_msg_id TEXT, + ADD COLUMN provider_seq INT NOT NULL DEFAULT 0; + +CREATE UNIQUE INDEX idx_outgoing_messages_provider_msg_id ON outgoing_messages (provider_msg_id); + +-- +migrate Down + +-- Lower version first when migrating down, to stop processing +UPDATE engine_processing_versions +SET "version" = 1 +WHERE type_id = 'message'; + +ALTER TABLE outgoing_messages + DROP COLUMN provider_msg_id, + DROP COLUMN provider_seq; diff --git a/migrate/migrations/20180803090205-drop-alert-assignments.sql b/migrate/migrations/20180803090205-drop-alert-assignments.sql new file mode 100644 index 0000000000..4661b17a81 --- /dev/null +++ b/migrate/migrations/20180803090205-drop-alert-assignments.sql @@ -0,0 +1,17 @@ + +-- +migrate Up +DROP TABLE alert_assignments; +-- +migrate Down +CREATE TABLE alert_assignments ( + user_id uuid NOT NULL, + alert_id bigint NOT NULL +); + +ALTER TABLE ONLY alert_assignments + ADD CONSTRAINT alert_assignments_pkey PRIMARY KEY (user_id, alert_id); + +ALTER TABLE ONLY alert_assignments + ADD CONSTRAINT alert_assignments_alert_id_fkey FOREIGN KEY (alert_id) REFERENCES alerts(id) ON DELETE CASCADE; + +ALTER TABLE ONLY alert_assignments + ADD CONSTRAINT alert_assignments_user_id_fkey FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE; diff --git a/migrate/migrations/20180803090305-drop-alert-escalation-policy-snapshots.sql b/migrate/migrations/20180803090305-drop-alert-escalation-policy-snapshots.sql new file mode 100644 index 0000000000..990c88d6e8 --- /dev/null +++ b/migrate/migrations/20180803090305-drop-alert-escalation-policy-snapshots.sql @@ -0,0 +1,22 @@ + +-- +migrate Up +DROP TABLE alert_escalation_policy_snapshots; +-- +migrate Down +CREATE TABLE alert_escalation_policy_snapshots ( + alert_id bigint NOT NULL, + step_number integer NOT NULL, + step_max integer NOT NULL, + step_delay interval NOT NULL, + repeat integer NOT NULL, + user_id uuid, + schedule_id uuid +); + +ALTER TABLE ONLY alert_escalation_policy_snapshots + ADD CONSTRAINT alert_escalation_policy_snapshots_alert_id_fkey FOREIGN KEY (alert_id) REFERENCES alerts(id) ON DELETE CASCADE; + +ALTER TABLE ONLY alert_escalation_policy_snapshots + ADD CONSTRAINT alert_escalation_policy_snapshots_schedule_id_fkey FOREIGN KEY (schedule_id) REFERENCES schedules(id) ON DELETE CASCADE; + +ALTER TABLE ONLY alert_escalation_policy_snapshots + ADD CONSTRAINT alert_escalation_policy_snapshots_user_id_fkey FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE; \ No newline at end of file diff --git a/migrate/migrations/20180803090405-drop-notification-logs.sql b/migrate/migrations/20180803090405-drop-notification-logs.sql new file mode 100644 index 0000000000..32b3702c4a --- /dev/null +++ b/migrate/migrations/20180803090405-drop-notification-logs.sql @@ -0,0 +1,21 @@ + +-- +migrate Up +DROP TABLE notification_logs; +-- +migrate Down +CREATE TABLE notification_logs ( + id uuid DEFAULT gen_random_uuid() NOT NULL, + alert_id bigint NOT NULL, + contact_method_id uuid NOT NULL, + process_timestamp timestamp with time zone DEFAULT now() NOT NULL, + completed boolean DEFAULT false NOT NULL +); + +ALTER TABLE ONLY notification_logs + ADD CONSTRAINT notification_logs_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY notification_logs + ADD CONSTRAINT notification_logs_alert_id_fkey FOREIGN KEY (alert_id) REFERENCES alerts(id) ON DELETE CASCADE; + +ALTER TABLE ONLY notification_logs + ADD CONSTRAINT notification_logs_contact_method_id_fkey FOREIGN KEY (contact_method_id) REFERENCES user_contact_methods(id) ON DELETE CASCADE; + diff --git a/migrate/migrations/20180803090505-drop-process-alerts.sql b/migrate/migrations/20180803090505-drop-process-alerts.sql new file mode 100644 index 0000000000..feb2a9af94 --- /dev/null +++ b/migrate/migrations/20180803090505-drop-process-alerts.sql @@ -0,0 +1,21 @@ + +-- +migrate Up +DROP TABLE process_alerts; + +-- +migrate Down +CREATE TABLE process_alerts ( + alert_id bigint NOT NULL, + client_id uuid, + deadline timestamp with time zone, + last_processed timestamp with time zone +); + +ALTER TABLE ONLY process_alerts + ADD CONSTRAINT process_alerts_pkey PRIMARY KEY (alert_id); + +CREATE INDEX process_alerts_oldest_first ON public.process_alerts USING btree (last_processed NULLS FIRST); + +CREATE TRIGGER trg_disable_old_alert_processing BEFORE INSERT ON public.process_alerts FOR EACH STATEMENT EXECUTE PROCEDURE fn_disable_inserts(); + +ALTER TABLE ONLY process_alerts + ADD CONSTRAINT process_alerts_alert_id_fkey FOREIGN KEY (alert_id) REFERENCES alerts(id) ON DELETE CASCADE; diff --git a/migrate/migrations/20180803090605-drop-process-rotations.sql b/migrate/migrations/20180803090605-drop-process-rotations.sql new file mode 100644 index 0000000000..f332edf03a --- /dev/null +++ b/migrate/migrations/20180803090605-drop-process-rotations.sql @@ -0,0 +1,19 @@ + +-- +migrate Up +DROP TABLE process_rotations; + +-- +migrate Down +CREATE TABLE process_rotations ( + rotation_id uuid NOT NULL, + client_id uuid, + deadline timestamp with time zone, + last_processed timestamp with time zone +); + +ALTER TABLE ONLY process_rotations + ADD CONSTRAINT process_rotations_pkey PRIMARY KEY (rotation_id); + +CREATE INDEX process_rotations_oldest_first ON public.process_rotations USING btree (last_processed NULLS FIRST); + +ALTER TABLE ONLY process_rotations + ADD CONSTRAINT process_rotations_rotation_id_fkey FOREIGN KEY (rotation_id) REFERENCES rotations(id) ON DELETE CASCADE; diff --git a/migrate/migrations/20180803090705-drop-process-schedules.sql b/migrate/migrations/20180803090705-drop-process-schedules.sql new file mode 100644 index 0000000000..e91442fc1e --- /dev/null +++ b/migrate/migrations/20180803090705-drop-process-schedules.sql @@ -0,0 +1,18 @@ + +-- +migrate Up +DROP TABLE process_schedules; +-- +migrate Down +CREATE TABLE process_schedules ( + schedule_id uuid NOT NULL, + client_id uuid, + deadline timestamp with time zone, + last_processed timestamp with time zone +); + +ALTER TABLE ONLY process_schedules + ADD CONSTRAINT process_schedules_pkey PRIMARY KEY (schedule_id); + +CREATE INDEX process_schedules_oldest_first ON public.process_schedules USING btree (last_processed NULLS FIRST); + +ALTER TABLE ONLY process_schedules + ADD CONSTRAINT process_schedules_schedule_id_fkey FOREIGN KEY (schedule_id) REFERENCES schedules(id) ON DELETE CASCADE; \ No newline at end of file diff --git a/migrate/migrations/20180803090805-drop-sent-notifications.sql b/migrate/migrations/20180803090805-drop-sent-notifications.sql new file mode 100644 index 0000000000..e1d3733728 --- /dev/null +++ b/migrate/migrations/20180803090805-drop-sent-notifications.sql @@ -0,0 +1,27 @@ + +-- +migrate Up +DROP TABLE sent_notifications; + +-- +migrate Down +CREATE TABLE sent_notifications ( + id uuid DEFAULT gen_random_uuid() NOT NULL, + alert_id bigint NOT NULL, + contact_method_id uuid NOT NULL, + sent_at timestamp with time zone, + cycle_id uuid NOT NULL, + notification_rule_id uuid NOT NULL +); + +ALTER TABLE ONLY sent_notifications + ADD CONSTRAINT sent_notifications_notification_rule_id_cycle_id_key UNIQUE (notification_rule_id, cycle_id); + +CREATE INDEX sent_notifications_id_idx ON public.sent_notifications USING btree (id); + +ALTER TABLE ONLY sent_notifications + ADD CONSTRAINT sent_notifications_alert_id_fkey FOREIGN KEY (alert_id) REFERENCES alerts(id) ON DELETE CASCADE; + +ALTER TABLE ONLY sent_notifications + ADD CONSTRAINT sent_notifications_contact_method_id_fkey FOREIGN KEY (contact_method_id) REFERENCES user_contact_methods(id) ON DELETE CASCADE; + +ALTER TABLE ONLY sent_notifications + ADD CONSTRAINT sent_notifications_notification_rule_id_fkey FOREIGN KEY (notification_rule_id) REFERENCES user_notification_rules(id) ON DELETE CASCADE; diff --git a/migrate/migrations/20180803090905-drop-throttle.sql b/migrate/migrations/20180803090905-drop-throttle.sql new file mode 100644 index 0000000000..2862b9632b --- /dev/null +++ b/migrate/migrations/20180803090905-drop-throttle.sql @@ -0,0 +1,15 @@ + +-- +migrate Up +DROP TABLE throttle; +-- +migrate Down +CREATE TABLE throttle ( + action enum_throttle_type NOT NULL, + client_id uuid, + last_action_time timestamp with time zone DEFAULT now() NOT NULL +); + +ALTER TABLE ONLY throttle + ADD CONSTRAINT throttle_pkey PRIMARY KEY (action); + +INSERT INTO throttle (action) +VALUES ('notifications'); diff --git a/migrate/migrations/20180803091005-drop-user-contact-method-locks.sql b/migrate/migrations/20180803091005-drop-user-contact-method-locks.sql new file mode 100644 index 0000000000..15f52eba01 --- /dev/null +++ b/migrate/migrations/20180803091005-drop-user-contact-method-locks.sql @@ -0,0 +1,23 @@ + +-- +migrate Up +DROP TABLE user_contact_method_locks; +-- +migrate Down +CREATE TABLE user_contact_method_locks ( + id uuid DEFAULT gen_random_uuid() NOT NULL, + client_id uuid NOT NULL, + alert_id bigint NOT NULL, + contact_method_id uuid NOT NULL, + "timestamp" timestamp with time zone DEFAULT now() NOT NULL +); + +ALTER TABLE ONLY user_contact_method_locks + ADD CONSTRAINT user_contact_method_locks_alert_id_contact_method_id_key UNIQUE (alert_id, contact_method_id); + +ALTER TABLE ONLY user_contact_method_locks + ADD CONSTRAINT user_contact_method_locks_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY user_contact_method_locks + ADD CONSTRAINT user_contact_method_locks_alert_id_fkey FOREIGN KEY (alert_id) REFERENCES alerts(id) ON DELETE CASCADE; + +ALTER TABLE ONLY user_contact_method_locks + ADD CONSTRAINT user_contact_method_locks_contact_method_id_fkey FOREIGN KEY (contact_method_id) REFERENCES user_contact_methods(id) ON DELETE CASCADE; \ No newline at end of file diff --git a/migrate/migrations/20180803110851-drop-twilio-egress-sms-status.sql b/migrate/migrations/20180803110851-drop-twilio-egress-sms-status.sql new file mode 100644 index 0000000000..2214d96d96 --- /dev/null +++ b/migrate/migrations/20180803110851-drop-twilio-egress-sms-status.sql @@ -0,0 +1,30 @@ + +-- +migrate Up + +DROP TABLE twilio_egress_sms_status; +DROP TYPE enum_twilio_sms_status; + +-- +migrate Down + + +CREATE TYPE enum_twilio_sms_status AS ENUM ( + 'unknown', -- in case twilio insists it doesn't exist when we ask + 'accepted', + 'queued', + 'sending', + 'sent', + 'receiving', + 'received', + 'delivered', + 'undelivered', + 'failed' +); + + +CREATE TABLE twilio_egress_sms_status ( + twilio_sid TEXT PRIMARY KEY, + last_status enum_twilio_sms_status NOT NULL, + sent_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), + last_update TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), + dest_number TEXT NOT NULL +); diff --git a/migrate/migrations/20180803110859-drop-twilio-egress-voice-status.sql b/migrate/migrations/20180803110859-drop-twilio-egress-voice-status.sql new file mode 100644 index 0000000000..d421bffa72 --- /dev/null +++ b/migrate/migrations/20180803110859-drop-twilio-egress-voice-status.sql @@ -0,0 +1,32 @@ + + +-- +migrate Up +DROP TABLE twilio_egress_voice_status; +DROP TYPE enum_twilio_voice_status; + + +-- +migrate Down + + +CREATE TYPE enum_twilio_voice_status AS ENUM ( + 'unknown', -- in case twilio insists it doesn't exist when we ask + 'initiated', + 'queued', + 'ringing', + 'in-progress', + 'completed', + 'busy', + 'failed', + 'no-answer', + 'canceled' +); + + +CREATE TABLE twilio_egress_voice_status ( + twilio_sid TEXT PRIMARY KEY, + last_status enum_twilio_voice_status NOT NULL, + sent_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), + last_update TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), + dest_number TEXT NOT NULL, + last_sequence_number INT +); diff --git a/migrate/migrations/20180806092512-incr-message-version.sql b/migrate/migrations/20180806092512-incr-message-version.sql new file mode 100644 index 0000000000..6694642bcc --- /dev/null +++ b/migrate/migrations/20180806092512-incr-message-version.sql @@ -0,0 +1,12 @@ + +-- +migrate Up + +UPDATE engine_processing_versions +SET "version" = 3 +WHERE type_id = 'message'; + +-- +migrate Down + +UPDATE engine_processing_versions +SET "version" = 2 +WHERE type_id = 'message'; diff --git a/migrate/migrations/20180806102513-drop-twilio-voice-callbacks.sql b/migrate/migrations/20180806102513-drop-twilio-voice-callbacks.sql new file mode 100644 index 0000000000..f8fa028e9c --- /dev/null +++ b/migrate/migrations/20180806102513-drop-twilio-voice-callbacks.sql @@ -0,0 +1,13 @@ + +-- +migrate Up +DROP TABLE twilio_voice_callbacks; + +-- +migrate Down +CREATE TABLE twilio_voice_callbacks ( + phone_number text, + callback_id uuid NOT NULL, + code integer NOT NULL, + description text NOT NULL DEFAULT ''::text, + twilio_sid text, + CONSTRAINT twilio_voice_callbacks_pkey PRIMARY KEY (phone_number, twilio_sid) +); diff --git a/migrate/migrations/20180806102620-drop-user-notification-cycles.sql b/migrate/migrations/20180806102620-drop-user-notification-cycles.sql new file mode 100644 index 0000000000..7f46899a47 --- /dev/null +++ b/migrate/migrations/20180806102620-drop-user-notification-cycles.sql @@ -0,0 +1,13 @@ + +-- +migrate Up +DROP TABLE user_notification_cycles; + +-- +migrate Down +CREATE TABLE user_notification_cycles ( + id uuid NOT NULL DEFAULT gen_random_uuid() UNIQUE, + user_id uuid REFERENCES users(id) ON DELETE CASCADE, + alert_id bigint REFERENCES alerts(id) ON DELETE CASCADE, + escalation_level integer NOT NULL, + started_at timestamp with time zone NOT NULL DEFAULT now(), + CONSTRAINT user_notification_cycles_pkey PRIMARY KEY (user_id, alert_id) +); diff --git a/migrate/migrations/20180806102708-drop-auth-github-users.sql b/migrate/migrations/20180806102708-drop-auth-github-users.sql new file mode 100644 index 0000000000..74848b84f1 --- /dev/null +++ b/migrate/migrations/20180806102708-drop-auth-github-users.sql @@ -0,0 +1,30 @@ + +-- +migrate Up + +DROP TABLE auth_github_users; +DROP FUNCTION fn_insert_github_user(); + +-- +migrate Down +CREATE TABLE auth_github_users ( + user_id uuid REFERENCES users(id) ON DELETE CASCADE PRIMARY KEY, + github_id text NOT NULL UNIQUE +); +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_insert_github_user() RETURNS TRIGGER AS +$$ +BEGIN + + INSERT INTO auth_subjects (provider_id, subject_id, user_id) + VALUES ('github', NEW.github_id::text, NEW.user_id) + ON CONFLICT (provider_id, subject_id) DO UPDATE + SET user_id = NEW.user_id; + + RETURN NEW; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +CREATE TRIGGER trg_insert_github_user +AFTER INSERT ON auth_github_users +FOR EACH ROW +EXECUTE PROCEDURE fn_insert_github_user(); diff --git a/migrate/migrations/20180806102923-drop-auth-token-codes.sql b/migrate/migrations/20180806102923-drop-auth-token-codes.sql new file mode 100644 index 0000000000..90704e631b --- /dev/null +++ b/migrate/migrations/20180806102923-drop-auth-token-codes.sql @@ -0,0 +1,11 @@ + +-- +migrate Up +DROP TABLE auth_token_codes; + +-- +migrate Down +CREATE TABLE auth_token_codes ( + id uuid DEFAULT gen_random_uuid() PRIMARY KEY, + user_id uuid NOT NULL UNIQUE REFERENCES users(id), + expires_at timestamp without time zone NOT NULL DEFAULT (now() + '00:05:00'::interval), + user_agent text NOT NULL +); diff --git a/migrate/migrations/20180816094955-switchover-state.sql b/migrate/migrations/20180816094955-switchover-state.sql new file mode 100644 index 0000000000..52c122d047 --- /dev/null +++ b/migrate/migrations/20180816094955-switchover-state.sql @@ -0,0 +1,21 @@ + +-- +migrate Up +CREATE TYPE enum_switchover_state as ENUM ( + 'idle', + 'in_progress', + 'use_next_db' +); + +CREATE TABLE switchover_state ( + ok BOOL PRIMARY KEY, + current_state enum_switchover_state NOT NULL, + CHECK(ok) +); + +INSERT INTO switchover_state (ok, current_state) +VALUES (true, 'idle'); + +-- +migrate Down + +DROP TABLE switchover_state; +DROP TYPE enum_switchover_state; \ No newline at end of file diff --git a/migrate/migrations/20180816095055-add-row-ids.sql b/migrate/migrations/20180816095055-add-row-ids.sql new file mode 100644 index 0000000000..97f6d88c34 --- /dev/null +++ b/migrate/migrations/20180816095055-add-row-ids.sql @@ -0,0 +1,71 @@ + +-- +migrate Up notransaction + +ALTER TABLE auth_basic_users +DROP COLUMN IF EXISTS id, +ADD COLUMN id BIGSERIAL, +ADD CONSTRAINT auth_basic_users_uniq_id UNIQUE(id); + +ALTER TABLE auth_subjects +DROP COLUMN IF EXISTS id, +ADD COLUMN id BIGSERIAL, +ADD CONSTRAINT auth_subjects_uniq_id UNIQUE(id); + +ALTER TABLE ep_step_on_call_users +DROP COLUMN IF EXISTS id, +ADD COLUMN id BIGSERIAL, +ADD CONSTRAINT ep_step_on_call_users_uniq_id UNIQUE(id); + +ALTER TABLE escalation_policy_state +DROP COLUMN IF EXISTS id, +ADD COLUMN id BIGSERIAL, +ADD CONSTRAINT escalation_policy_state_uniq_id UNIQUE(id); + +ALTER TABLE rotation_state +DROP COLUMN IF EXISTS id, +ADD COLUMN id BIGSERIAL, +ADD CONSTRAINT rotation_state_uniq_id UNIQUE(id); + +ALTER TABLE schedule_on_call_users +DROP COLUMN IF EXISTS id, +ADD COLUMN id BIGSERIAL, +ADD CONSTRAINT schedule_on_call_users_uniq_id UNIQUE(id); + +ALTER TABLE twilio_sms_callbacks +DROP COLUMN IF EXISTS id, +ADD COLUMN id BIGSERIAL, +ADD CONSTRAINT twilio_sms_callbacks_uniq_id UNIQUE(id); + +ALTER TABLE twilio_sms_errors +DROP COLUMN IF EXISTS id, +ADD COLUMN id BIGSERIAL, +ADD CONSTRAINT twilio_sms_errors_uniq_id UNIQUE(id); + +ALTER TABLE twilio_voice_errors +DROP COLUMN IF EXISTS id, +ADD COLUMN id BIGSERIAL, +ADD CONSTRAINT twilio_voice_errors_uniq_id UNIQUE(id); + +ALTER TABLE user_favorites +DROP COLUMN IF EXISTS id, +ADD COLUMN id BIGSERIAL, +ADD CONSTRAINT user_favorites_uniq_id UNIQUE(id); + +ALTER TABLE user_last_alert_log +DROP COLUMN IF EXISTS id, +ADD COLUMN id BIGSERIAL, +ADD CONSTRAINT user_last_alert_log_uniq_id UNIQUE(id); + +-- +migrate Down notransaction + +ALTER TABLE auth_basic_users DROP COLUMN IF EXISTS id; +ALTER TABLE auth_subjects DROP COLUMN IF EXISTS id; +ALTER TABLE ep_step_on_call_users DROP COLUMN IF EXISTS id; +ALTER TABLE escalation_policy_state DROP COLUMN IF EXISTS id; +ALTER TABLE rotation_state DROP COLUMN IF EXISTS id; +ALTER TABLE schedule_on_call_users DROP COLUMN IF EXISTS id; +ALTER TABLE twilio_sms_callbacks DROP COLUMN IF EXISTS id; +ALTER TABLE twilio_sms_errors DROP COLUMN IF EXISTS id; +ALTER TABLE twilio_voice_errors DROP COLUMN IF EXISTS id; +ALTER TABLE user_favorites DROP COLUMN IF EXISTS id; +ALTER TABLE user_last_alert_log DROP COLUMN IF EXISTS id; diff --git a/migrate/migrations/20180816095155-change-log.sql b/migrate/migrations/20180816095155-change-log.sql new file mode 100644 index 0000000000..dc43ae9227 --- /dev/null +++ b/migrate/migrations/20180816095155-change-log.sql @@ -0,0 +1,399 @@ + +-- +migrate Up notransaction + +-- +migrate StatementBegin +BEGIN; + +DROP TABLE IF EXISTS change_log; +CREATE TABLE IF NOT EXISTS change_log ( + id BIGSERIAL PRIMARY KEY, + op TEXT NOT NULL, + table_name TEXT NOT NULL, + row_id TEXT NOT NULL, + tx_id BIGINT, + cmd_id cid, + row_data JSONB +); + + +CREATE OR REPLACE FUNCTION process_change() RETURNS TRIGGER AS $$ +DECLARE + cur_state enum_switchover_state := 'idle'; +BEGIN + SELECT INTO cur_state current_state + FROM switchover_state; + + IF cur_state != 'in_progress' THEN + RETURN NEW; + END IF; + + IF (TG_OP = 'DELETE') THEN + INSERT INTO change_log (op, table_name, row_id, tx_id, cmd_id) + VALUES (TG_OP, TG_TABLE_NAME, cast(OLD.id as TEXT), txid_current(), OLD.cmax); + RETURN OLD; + ELSE + INSERT INTO change_log (op, table_name, row_id, tx_id, cmd_id, row_data) + VALUES (TG_OP, TG_TABLE_NAME, cast(NEW.id as TEXT), txid_current(), NEW.cmin, to_jsonb(NEW)); + RETURN NEW; + END IF; + + RETURN NULL; +END; +$$ LANGUAGE 'plpgsql'; +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_alert_logs_change_log ON alert_logs; +CREATE TRIGGER zz_99_alert_logs_change_log +AFTER INSERT OR UPDATE OR DELETE ON alert_logs +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_alerts_change_log ON alerts; +CREATE TRIGGER zz_99_alerts_change_log +AFTER INSERT OR UPDATE OR DELETE ON alerts +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_auth_basic_users_change_log ON auth_basic_users; +CREATE TRIGGER zz_99_auth_basic_users_change_log +AFTER INSERT OR UPDATE OR DELETE ON auth_basic_users +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_auth_nonce_change_log ON auth_nonce; +CREATE TRIGGER zz_99_auth_nonce_change_log +AFTER INSERT OR UPDATE OR DELETE ON auth_nonce +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_auth_subjects_change_log ON auth_subjects; +CREATE TRIGGER zz_99_auth_subjects_change_log +AFTER INSERT OR UPDATE OR DELETE ON auth_subjects +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_auth_user_sessions_change_log ON auth_user_sessions; +CREATE TRIGGER zz_99_auth_user_sessions_change_log +AFTER INSERT OR UPDATE OR DELETE ON auth_user_sessions +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_config_limits_change_log ON config_limits; +CREATE TRIGGER zz_99_config_limits_change_log +AFTER INSERT OR UPDATE OR DELETE ON config_limits +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_ep_step_on_call_users_change_log ON ep_step_on_call_users; +CREATE TRIGGER zz_99_ep_step_on_call_users_change_log +AFTER INSERT OR UPDATE OR DELETE ON ep_step_on_call_users +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_escalation_policies_change_log ON escalation_policies; +CREATE TRIGGER zz_99_escalation_policies_change_log +AFTER INSERT OR UPDATE OR DELETE ON escalation_policies +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_escalation_policy_actions_change_log ON escalation_policy_actions; +CREATE TRIGGER zz_99_escalation_policy_actions_change_log +AFTER INSERT OR UPDATE OR DELETE ON escalation_policy_actions +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_escalation_policy_state_change_log ON escalation_policy_state; +CREATE TRIGGER zz_99_escalation_policy_state_change_log +AFTER INSERT OR UPDATE OR DELETE ON escalation_policy_state +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_escalation_policy_steps_change_log ON escalation_policy_steps; +CREATE TRIGGER zz_99_escalation_policy_steps_change_log +AFTER INSERT OR UPDATE OR DELETE ON escalation_policy_steps +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_heartbeat_monitors_change_log ON heartbeat_monitors; +CREATE TRIGGER zz_99_heartbeat_monitors_change_log +AFTER INSERT OR UPDATE OR DELETE ON heartbeat_monitors +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_integration_keys_change_log ON integration_keys; +CREATE TRIGGER zz_99_integration_keys_change_log +AFTER INSERT OR UPDATE OR DELETE ON integration_keys +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_keyring_change_log ON keyring; +CREATE TRIGGER zz_99_keyring_change_log +AFTER INSERT OR UPDATE OR DELETE ON keyring +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_notification_policy_cycles_change_log ON notification_policy_cycles; +CREATE TRIGGER zz_99_notification_policy_cycles_change_log +AFTER INSERT OR UPDATE OR DELETE ON notification_policy_cycles +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_outgoing_messages_change_log ON outgoing_messages; +CREATE TRIGGER zz_99_outgoing_messages_change_log +AFTER INSERT OR UPDATE OR DELETE ON outgoing_messages +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_region_ids_change_log ON region_ids; +CREATE TRIGGER zz_99_region_ids_change_log +AFTER INSERT OR UPDATE OR DELETE ON region_ids +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_rotation_participants_change_log ON rotation_participants; +CREATE TRIGGER zz_99_rotation_participants_change_log +AFTER INSERT OR UPDATE OR DELETE ON rotation_participants +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_rotation_state_change_log ON rotation_state; +CREATE TRIGGER zz_99_rotation_state_change_log +AFTER INSERT OR UPDATE OR DELETE ON rotation_state +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_rotations_change_log ON rotations; +CREATE TRIGGER zz_99_rotations_change_log +AFTER INSERT OR UPDATE OR DELETE ON rotations +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_schedule_on_call_users_change_log ON schedule_on_call_users; +CREATE TRIGGER zz_99_schedule_on_call_users_change_log +AFTER INSERT OR UPDATE OR DELETE ON schedule_on_call_users +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_schedule_rules_change_log ON schedule_rules; +CREATE TRIGGER zz_99_schedule_rules_change_log +AFTER INSERT OR UPDATE OR DELETE ON schedule_rules +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_schedules_change_log ON schedules; +CREATE TRIGGER zz_99_schedules_change_log +AFTER INSERT OR UPDATE OR DELETE ON schedules +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_services_change_log ON services; +CREATE TRIGGER zz_99_services_change_log +AFTER INSERT OR UPDATE OR DELETE ON services +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_twilio_sms_callbacks_change_log ON twilio_sms_callbacks; +CREATE TRIGGER zz_99_twilio_sms_callbacks_change_log +AFTER INSERT OR UPDATE OR DELETE ON twilio_sms_callbacks +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_twilio_sms_errors_change_log ON twilio_sms_errors; +CREATE TRIGGER zz_99_twilio_sms_errors_change_log +AFTER INSERT OR UPDATE OR DELETE ON twilio_sms_errors +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_twilio_voice_errors_change_log ON twilio_voice_errors; +CREATE TRIGGER zz_99_twilio_voice_errors_change_log +AFTER INSERT OR UPDATE OR DELETE ON twilio_voice_errors +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_user_contact_methods_change_log ON user_contact_methods; +CREATE TRIGGER zz_99_user_contact_methods_change_log +AFTER INSERT OR UPDATE OR DELETE ON user_contact_methods +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_user_favorites_change_log ON user_favorites; +CREATE TRIGGER zz_99_user_favorites_change_log +AFTER INSERT OR UPDATE OR DELETE ON user_favorites +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_user_last_alert_log_change_log ON user_last_alert_log; +CREATE TRIGGER zz_99_user_last_alert_log_change_log +AFTER INSERT OR UPDATE OR DELETE ON user_last_alert_log +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_user_notification_rules_change_log ON user_notification_rules; +CREATE TRIGGER zz_99_user_notification_rules_change_log +AFTER INSERT OR UPDATE OR DELETE ON user_notification_rules +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_user_overrides_change_log ON user_overrides; +CREATE TRIGGER zz_99_user_overrides_change_log +AFTER INSERT OR UPDATE OR DELETE ON user_overrides +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_user_verification_codes_change_log ON user_verification_codes; +CREATE TRIGGER zz_99_user_verification_codes_change_log +AFTER INSERT OR UPDATE OR DELETE ON user_verification_codes +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_users_change_log ON users; +CREATE TRIGGER zz_99_users_change_log +AFTER INSERT OR UPDATE OR DELETE ON users +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate Down notransaction +DROP TRIGGER IF EXISTS zz_99_alert_logs_change_log ON alert_logs; +DROP TRIGGER IF EXISTS zz_99_alerts_change_log ON alerts; +DROP TRIGGER IF EXISTS zz_99_auth_basic_users_change_log ON auth_basic_users; +DROP TRIGGER IF EXISTS zz_99_auth_nonce_change_log ON auth_nonce; +DROP TRIGGER IF EXISTS zz_99_auth_subjects_change_log ON auth_subjects; +DROP TRIGGER IF EXISTS zz_99_auth_user_sessions_change_log ON auth_user_sessions; +DROP TRIGGER IF EXISTS zz_99_config_limits_change_log ON config_limits; +DROP TRIGGER IF EXISTS zz_99_ep_step_on_call_users_change_log ON ep_step_on_call_users; +DROP TRIGGER IF EXISTS zz_99_escalation_policies_change_log ON escalation_policies; +DROP TRIGGER IF EXISTS zz_99_escalation_policy_actions_change_log ON escalation_policy_actions; +DROP TRIGGER IF EXISTS zz_99_escalation_policy_state_change_log ON escalation_policy_state; +DROP TRIGGER IF EXISTS zz_99_escalation_policy_steps_change_log ON escalation_policy_steps; +DROP TRIGGER IF EXISTS zz_99_heartbeat_monitors_change_log ON heartbeat_monitors; +DROP TRIGGER IF EXISTS zz_99_integration_keys_change_log ON integration_keys; +DROP TRIGGER IF EXISTS zz_99_keyring_change_log ON keyring; +DROP TRIGGER IF EXISTS zz_99_notification_policy_cycles_change_log ON notification_policy_cycles; +DROP TRIGGER IF EXISTS zz_99_outgoing_messages_change_log ON outgoing_messages; +DROP TRIGGER IF EXISTS zz_99_region_ids_change_log ON region_ids; +DROP TRIGGER IF EXISTS zz_99_rotation_participants_change_log ON rotation_participants; +DROP TRIGGER IF EXISTS zz_99_rotation_state_change_log ON rotation_state; +DROP TRIGGER IF EXISTS zz_99_rotations_change_log ON rotations; +DROP TRIGGER IF EXISTS zz_99_schedule_on_call_users_change_log ON schedule_on_call_users; +DROP TRIGGER IF EXISTS zz_99_schedule_rules_change_log ON schedule_rules; +DROP TRIGGER IF EXISTS zz_99_schedules_change_log ON schedules; +DROP TRIGGER IF EXISTS zz_99_services_change_log ON services; +DROP TRIGGER IF EXISTS zz_99_twilio_sms_callbacks_change_log ON twilio_sms_callbacks; +DROP TRIGGER IF EXISTS zz_99_twilio_sms_errors_change_log ON twilio_sms_errors; +DROP TRIGGER IF EXISTS zz_99_twilio_voice_errors_change_log ON twilio_voice_errors; +DROP TRIGGER IF EXISTS zz_99_user_contact_methods_change_log ON user_contact_methods; +DROP TRIGGER IF EXISTS zz_99_user_favorites_change_log ON user_favorites; +DROP TRIGGER IF EXISTS zz_99_user_last_alert_log_change_log ON user_last_alert_log; +DROP TRIGGER IF EXISTS zz_99_user_notification_rules_change_log ON user_notification_rules; +DROP TRIGGER IF EXISTS zz_99_user_overrides_change_log ON user_overrides; +DROP TRIGGER IF EXISTS zz_99_user_verification_codes_change_log ON user_verification_codes; +DROP TRIGGER IF EXISTS zz_99_users_change_log ON users; + +DROP FUNCTION IF EXISTS process_change(); +DROP TABLE IF EXISTS change_log; diff --git a/migrate/migrations/20180816164203-drop-end-time-check.sql b/migrate/migrations/20180816164203-drop-end-time-check.sql new file mode 100644 index 0000000000..1741f6a7c6 --- /dev/null +++ b/migrate/migrations/20180816164203-drop-end-time-check.sql @@ -0,0 +1,7 @@ + +-- +migrate Up +ALTER TABLE user_overrides DROP CONSTRAINT IF EXISTS user_overrides_end_time_check; + +-- +migrate Down +ALTER TABLE user_overrides + ADD CONSTRAINT user_overrides_end_time_check CHECK ((end_time > now())); diff --git a/migrate/migrations/20180821150330-deferable-status-cm.sql b/migrate/migrations/20180821150330-deferable-status-cm.sql new file mode 100644 index 0000000000..38bcc0087e --- /dev/null +++ b/migrate/migrations/20180821150330-deferable-status-cm.sql @@ -0,0 +1,8 @@ + +-- +migrate Up +ALTER TABLE users + ALTER CONSTRAINT users_alert_status_log_contact_method_id_fkey DEFERRABLE; + +-- +migrate Down +ALTER TABLE users + ALTER CONSTRAINT users_alert_status_log_contact_method_id_fkey NOT DEFERRABLE; diff --git a/migrate/migrations/20180822153707-defer-rotation-state.sql b/migrate/migrations/20180822153707-defer-rotation-state.sql new file mode 100644 index 0000000000..658c2a6c32 --- /dev/null +++ b/migrate/migrations/20180822153707-defer-rotation-state.sql @@ -0,0 +1,10 @@ + +-- +migrate Up +ALTER TABLE rotation_state + DROP CONSTRAINT rotation_state_rotation_participant_id_fkey, + ADD CONSTRAINT rotation_state_rotation_participant_id_fkey FOREIGN KEY (rotation_participant_id) REFERENCES rotation_participants (id) ON DELETE NO ACTION DEFERRABLE; + +-- +migrate Down +ALTER TABLE rotation_state + DROP CONSTRAINT rotation_state_rotation_participant_id_fkey, + ADD CONSTRAINT rotation_state_rotation_participant_id_fkey FOREIGN KEY (rotation_participant_id) REFERENCES rotation_participants (id) ON DELETE RESTRICT; diff --git a/migrate/migrations/20180822153914-defer-ep-state.sql b/migrate/migrations/20180822153914-defer-ep-state.sql new file mode 100644 index 0000000000..f074315a2b --- /dev/null +++ b/migrate/migrations/20180822153914-defer-ep-state.sql @@ -0,0 +1,8 @@ + +-- +migrate Up +ALTER TABLE escalation_policy_state + ALTER CONSTRAINT svc_ep_fkey DEFERRABLE; + +-- +migrate Down +ALTER TABLE escalation_policy_state + ALTER CONSTRAINT svc_ep_fkey NOT DEFERRABLE; diff --git a/migrate/migrations/20180831132457-user-last-alert-log-indexes.sql b/migrate/migrations/20180831132457-user-last-alert-log-indexes.sql new file mode 100644 index 0000000000..a200d02348 --- /dev/null +++ b/migrate/migrations/20180831132457-user-last-alert-log-indexes.sql @@ -0,0 +1,15 @@ + +-- +migrate Up notransaction +drop index if exists idx_ulal_log_id; +create index concurrently idx_ulal_log_id on user_last_alert_log (log_id); + +drop index if exists idx_ulal_next_log_id; +create index concurrently idx_ulal_next_log_id on user_last_alert_log (next_log_id); + +drop index if exists idx_ulal_alert_id; +create index concurrently idx_ulal_alert_id on user_last_alert_log (alert_id); + +-- +migrate Down notransaction +drop index if exists idx_ulal_log_id; +drop index if exists idx_ulal_next_log_id; +drop index if exists idx_ulal_alert_id; diff --git a/migrate/migrations/20180831132707-alerts-service-index.sql b/migrate/migrations/20180831132707-alerts-service-index.sql new file mode 100644 index 0000000000..4ac25e5825 --- /dev/null +++ b/migrate/migrations/20180831132707-alerts-service-index.sql @@ -0,0 +1,7 @@ + +-- +migrate Up notransaction +drop index if exists idx_alert_service_id; +create index concurrently idx_alert_service_id on alerts (service_id); + +-- +migrate Down notransaction +drop index if exists idx_alert_service_id; diff --git a/migrate/migrations/20180831132743-np-cycle-alert-index.sql b/migrate/migrations/20180831132743-np-cycle-alert-index.sql new file mode 100644 index 0000000000..3b1c9d2c1b --- /dev/null +++ b/migrate/migrations/20180831132743-np-cycle-alert-index.sql @@ -0,0 +1,7 @@ + +-- +migrate Up notransaction +drop index if exists idx_np_cycle_alert_id; +create index concurrently idx_np_cycle_alert_id on notification_policy_cycles (alert_id); + +-- +migrate Down notransaction +drop index if exists idx_np_cycle_alert_id; diff --git a/migrate/migrations/20180831132927-alert-logs-index.sql b/migrate/migrations/20180831132927-alert-logs-index.sql new file mode 100644 index 0000000000..e969da8938 --- /dev/null +++ b/migrate/migrations/20180831132927-alert-logs-index.sql @@ -0,0 +1,21 @@ + +-- +migrate Up notransaction + +drop index if exists idx_alert_logs_alert_id; +create index concurrently idx_alert_logs_alert_id on alert_logs (alert_id); + + +drop index if exists idx_alert_logs_hb_id; +create index concurrently idx_alert_logs_hb_id on alert_logs (sub_hb_monitor_id); + +drop index if exists idx_alert_logs_int_id; +create index concurrently idx_alert_logs_int_id on alert_logs (sub_integration_key_id); + +drop index if exists idx_alert_logs_user_id; +create index concurrently idx_alert_logs_user_id on alert_logs (sub_user_id); + +-- +migrate Down notransaction +drop index if exists idx_alert_logs_alert_id; +drop index if exists idx_alert_logs_hb_id; +drop index if exists idx_alert_logs_int_id; +drop index if exists idx_alert_logs_user_id; diff --git a/migrate/migrations/20180831143308-outgoing-messages-index.sql b/migrate/migrations/20180831143308-outgoing-messages-index.sql new file mode 100644 index 0000000000..f56448d281 --- /dev/null +++ b/migrate/migrations/20180831143308-outgoing-messages-index.sql @@ -0,0 +1,13 @@ + +-- +migrate Up notransaction + +drop index if exists idx_om_alert_log_id; +create index concurrently idx_om_alert_log_id on outgoing_messages (alert_log_id); + +drop index if exists idx_om_vcode_id; +create index concurrently idx_om_vcode_id on outgoing_messages (user_verification_code_id); + +-- +migrate Down notransaction + +drop index if exists idx_om_alert_log_id; +drop index if exists idx_om_vcode_id; diff --git a/migrate/migrations/20180907111203-schedule-rule-endtime-fix.sql b/migrate/migrations/20180907111203-schedule-rule-endtime-fix.sql new file mode 100644 index 0000000000..f6b5fb9c40 --- /dev/null +++ b/migrate/migrations/20180907111203-schedule-rule-endtime-fix.sql @@ -0,0 +1,34 @@ + +-- +migrate Up +UPDATE engine_processing_versions +SET "version" = 2 +WHERE type_id = 'schedule'; + +LOCK schedule_rules IN EXCLUSIVE MODE; + +UPDATE schedule_rules +SET + end_time = cast(date_trunc('minute', end_time)+'1 minute'::interval as time without time zone), + start_time = date_trunc('minute', start_time) +; + +UPDATE schedule_rules +SET end_time = cast(end_time-'1 minute'::interval as time without time zone) +WHERE date_part('minute', end_time)::integer % 5 = 1; + +-- +migrate Down +SELECT 1 +FROM engine_processing_versions +WHERE type_id = 'schedule' +FOR UPDATE; + +LOCK schedule_rules IN EXCLUSIVE MODE; + +UPDATE schedule_rules +SET + end_time = cast(date_trunc('minute', end_time)-'1 minute'::interval as time without time zone) +; + +UPDATE engine_processing_versions +SET "version" = 1 +WHERE type_id = 'schedule'; diff --git a/migrate/migrations/20180918102226-add-service-label.sql b/migrate/migrations/20180918102226-add-service-label.sql new file mode 100644 index 0000000000..e5ea9cd240 --- /dev/null +++ b/migrate/migrations/20180918102226-add-service-label.sql @@ -0,0 +1,14 @@ +-- +migrate Up +CREATE TABLE labels ( + id BIGSERIAL PRIMARY KEY, + tgt_service_id UUID NOT NULL REFERENCES services(id) ON DELETE CASCADE, + key TEXT NOT NULL, + value TEXT NOT NULL, + UNIQUE (tgt_service_id, key) +); + +CREATE INDEX idx_labels_service_id ON labels (tgt_service_id); + +-- +migrate Down +DROP INDEX idx_labels_service_id; +DROP TABLE labels; \ No newline at end of file diff --git a/migrate/migrations/20181004032148-labels-switchover-trigger.sql b/migrate/migrations/20181004032148-labels-switchover-trigger.sql new file mode 100644 index 0000000000..bfd8e5f8a8 --- /dev/null +++ b/migrate/migrations/20181004032148-labels-switchover-trigger.sql @@ -0,0 +1,15 @@ + +-- +migrate Up notransaction + +-- +migrate StatementBegin +BEGIN; +DROP TRIGGER IF EXISTS zz_99_labels_change_log ON labels; +CREATE TRIGGER zz_99_labels_change_log +AFTER INSERT OR UPDATE OR DELETE ON labels +FOR EACH ROW EXECUTE PROCEDURE process_change(); +COMMIT; +-- +migrate StatementEnd + +-- +migrate Down notransaction + +DROP TRIGGER IF EXISTS zz_99_labels_change_log ON labels; diff --git a/migrate/migrations/20181004145558-fix-deleting-participants.sql b/migrate/migrations/20181004145558-fix-deleting-participants.sql new file mode 100644 index 0000000000..1b25a76909 --- /dev/null +++ b/migrate/migrations/20181004145558-fix-deleting-participants.sql @@ -0,0 +1,91 @@ +-- +migrate Up + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_advance_or_end_rot_on_part_del() RETURNS TRIGGER AS +$$ +DECLARE + new_part UUID; + active_part UUID; +BEGIN + + SELECT rotation_participant_id + INTO active_part + FROM rotation_state + WHERE rotation_id = OLD.rotation_id; + + IF active_part != OLD.id THEN + RETURN OLD; + END IF; + + IF (select 1 from rotations where id = OLD.rotation_id) != 1 THEN + DELETE FROM rotation_state + WHERE rotation_id = OLD.rotation_id; + END IF; + + SELECT id + INTO new_part + FROM rotation_participants + WHERE + rotation_id = OLD.rotation_id AND + id != OLD.id AND + position IN (0, OLD.position+1) + ORDER BY position DESC + LIMIT 1; + + IF new_part ISNULL THEN + DELETE FROM rotation_state + WHERE rotation_id = OLD.rotation_id; + ELSE + UPDATE rotation_state + SET rotation_participant_id = new_part + WHERE rotation_id = OLD.rotation_id; + END IF; + + RETURN OLD; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + + +-- +migrate Down + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_advance_or_end_rot_on_part_del() RETURNS TRIGGER AS +$$ +DECLARE + new_part UUID; + active_part UUID; +BEGIN + + SELECT rotation_participant_id + INTO active_part + FROM rotation_state + WHERE rotation_id = OLD.rotation_id; + + IF active_part != OLD.id THEN + RETURN OLD; + END IF; + + SELECT id + INTO new_part + FROM rotation_participants + WHERE + rotation_id = OLD.rotation_id AND + id != OLD.id AND + position IN (0, OLD.position+1) + ORDER BY position DESC + LIMIT 1; + + IF new_part ISNULL THEN + DELETE FROM rotation_state + WHERE rotation_id = OLD.rotation_id; + ELSE + UPDATE rotation_state + SET rotation_participant_id = new_part + WHERE rotation_id = OLD.rotation_id; + END IF; + + RETURN OLD; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd diff --git a/migrate/migrations/20181008111401-twilio-sms-short-reply.sql b/migrate/migrations/20181008111401-twilio-sms-short-reply.sql new file mode 100644 index 0000000000..c3f11de781 --- /dev/null +++ b/migrate/migrations/20181008111401-twilio-sms-short-reply.sql @@ -0,0 +1,52 @@ + +-- +migrate Up +UPDATE engine_processing_versions +SET version = 4 +WHERE type_id = 'message'; + +ALTER TABLE twilio_sms_callbacks + ADD COLUMN sent_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), + ADD COLUMN alert_id BIGINT REFERENCES alerts (id) ON DELETE CASCADE, + DROP COLUMN twilio_sid; + +CREATE INDEX idx_twilio_sms_alert_id ON twilio_sms_callbacks (alert_id); + +-- cleanup old codes +DELETE FROM twilio_sms_callbacks +WHERE code NOT IN + ( + SELECT id + FROM alerts + WHERE + status != 'closed' OR + created_at > now() - '1 day'::interval + ); + +-- cleanup duplicate codes +DELETE FROM twilio_sms_callbacks +WHERE id NOT IN ( + SELECT max(id) max_id + FROM twilio_sms_callbacks + GROUP BY phone_number, code +); + + +UPDATE twilio_sms_callbacks +SET alert_id = code; + +ALTER TABLE twilio_sms_callbacks + ALTER COLUMN alert_id SET NOT NULL; + +CREATE UNIQUE INDEX idx_twilio_sms_codes ON twilio_sms_callbacks (phone_number, code); + +-- +migrate Down +UPDATE engine_processing_versions +SET version = 3 +WHERE type_id = 'message'; + +ALTER TABLE twilio_sms_callbacks + DROP COLUMN sent_at, + DROP COLUMN alert_id, + ADD COLUMN twilio_sid TEXT NOT NULL; + +DROP INDEX idx_twilio_sms_codes; diff --git a/migrate/migrations/20181018131939-fix-rotation-deletions.sql b/migrate/migrations/20181018131939-fix-rotation-deletions.sql new file mode 100644 index 0000000000..8cddb29b0e --- /dev/null +++ b/migrate/migrations/20181018131939-fix-rotation-deletions.sql @@ -0,0 +1,99 @@ +-- +migrate Up + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_advance_or_end_rot_on_part_del() RETURNS TRIGGER AS +$$ +DECLARE + new_part UUID; + active_part UUID; +BEGIN + + SELECT rotation_participant_id + INTO active_part + FROM rotation_state + WHERE rotation_id = OLD.rotation_id; + + IF active_part != OLD.id THEN + RETURN OLD; + END IF; + + IF OLD.rotation_id NOT IN ( + SELECT id FROM rotations + ) THEN + DELETE FROM rotation_state + WHERE rotation_id = OLD.rotation_id; + END IF; + + SELECT id + INTO new_part + FROM rotation_participants + WHERE + rotation_id = OLD.rotation_id AND + id != OLD.id AND + position IN (0, OLD.position+1) + ORDER BY position DESC + LIMIT 1; + + IF new_part ISNULL THEN + DELETE FROM rotation_state + WHERE rotation_id = OLD.rotation_id; + ELSE + UPDATE rotation_state + SET rotation_participant_id = new_part + WHERE rotation_id = OLD.rotation_id; + END IF; + + RETURN OLD; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + + +-- +migrate Down + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_advance_or_end_rot_on_part_del() RETURNS TRIGGER AS +$$ +DECLARE + new_part UUID; + active_part UUID; +BEGIN + + SELECT rotation_participant_id + INTO active_part + FROM rotation_state + WHERE rotation_id = OLD.rotation_id; + + IF active_part != OLD.id THEN + RETURN OLD; + END IF; + + IF (select 1 from rotations where id = OLD.rotation_id) != 1 THEN + DELETE FROM rotation_state + WHERE rotation_id = OLD.rotation_id; + END IF; + + SELECT id + INTO new_part + FROM rotation_participants + WHERE + rotation_id = OLD.rotation_id AND + id != OLD.id AND + position IN (0, OLD.position+1) + ORDER BY position DESC + LIMIT 1; + + IF new_part ISNULL THEN + DELETE FROM rotation_state + WHERE rotation_id = OLD.rotation_id; + ELSE + UPDATE rotation_state + SET rotation_participant_id = new_part + WHERE rotation_id = OLD.rotation_id; + END IF; + + RETURN OLD; +END; +$$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + diff --git a/migrate/migrations/20181107133329-notification-channels.sql b/migrate/migrations/20181107133329-notification-channels.sql new file mode 100644 index 0000000000..5a418c52b8 --- /dev/null +++ b/migrate/migrations/20181107133329-notification-channels.sql @@ -0,0 +1,20 @@ + +-- +migrate Up + +CREATE TYPE enum_notif_channel_type AS ENUM ( + 'SLACK' +); + +CREATE TABLE notification_channels ( + id UUID PRIMARY KEY, + created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), + type enum_notif_channel_type NOT NULL, + name TEXT NOT NULL, + value TEXT NOT NULL, + meta jsonb +); + +-- +migrate Down + +DROP TABLE notification_channels; +DROP TYPE enum_notif_channel_type; diff --git a/migrate/migrations/20181107155035-nc-id-to-ep-action.sql b/migrate/migrations/20181107155035-nc-id-to-ep-action.sql new file mode 100644 index 0000000000..9fa2081c3f --- /dev/null +++ b/migrate/migrations/20181107155035-nc-id-to-ep-action.sql @@ -0,0 +1,19 @@ + +-- +migrate Up + +ALTER TABLE escalation_policy_actions + ADD COLUMN channel_id UUID REFERENCES notification_channels (id) ON DELETE CASCADE, + DROP CONSTRAINT epa_there_can_only_be_one, + ADD CONSTRAINT epa_there_can_only_be_one CHECK ( + (case when user_id notnull then 1 else 0 end + + case when schedule_id notnull then 1 else 0 end + + case when rotation_id notnull then 1 else 0 end + + case when channel_id notnull then 1 else 0 end) = 1 + ), + ADD CONSTRAINT epa_no_duplicate_channels UNIQUE (escalation_policy_step_id, channel_id); + +-- +migrate Down + +ALTER TABLE escalation_policy_actions + DROP COLUMN channel_id, + ADD CONSTRAINT epa_there_can_only_be_one CHECK (user_id IS NULL AND schedule_id IS NULL AND rotation_id IS NOT NULL OR user_id IS NULL AND schedule_id IS NOT NULL AND rotation_id IS NULL OR user_id IS NOT NULL AND schedule_id IS NULL AND rotation_id IS NULL); diff --git a/migrate/migrations/20181107155229-om-notification-channel.sql b/migrate/migrations/20181107155229-om-notification-channel.sql new file mode 100644 index 0000000000..32ce27f414 --- /dev/null +++ b/migrate/migrations/20181107155229-om-notification-channel.sql @@ -0,0 +1,18 @@ + +-- +migrate Up +ALTER TABLE outgoing_messages + ADD COLUMN channel_id UUID REFERENCES notification_channels (id) ON DELETE CASCADE, + ALTER COLUMN user_id DROP NOT NULL, + ALTER COLUMN contact_method_id DROP NOT NULL, + ADD CONSTRAINT om_user_cm_or_channel CHECK( + (user_id notnull and contact_method_id notnull and channel_id isnull) + or + (channel_id notnull and contact_method_id isnull and user_id isnull) + ); +-- +migrate Down +ALTER TABLE outgoing_messages + DROP CONSTRAINT om_user_cm_or_channel, + DROP COLUMN channel_id, + ALTER COLUMN user_id SET NOT NULL, + ALTER COLUMN contact_method_id SET NOT NULL; + diff --git a/migrate/migrations/20190117130422-notif-chan-engine-versions.sql b/migrate/migrations/20190117130422-notif-chan-engine-versions.sql new file mode 100644 index 0000000000..ca9e2f3705 --- /dev/null +++ b/migrate/migrations/20190117130422-notif-chan-engine-versions.sql @@ -0,0 +1,21 @@ + +-- +migrate Up + +UPDATE engine_processing_versions +SET "version" = 5 +WHERE type_id = 'message'; + + +UPDATE engine_processing_versions +SET "version" = 3 +WHERE type_id = 'escalation'; + +-- +migrate Down + +UPDATE engine_processing_versions +SET "version" = 2 +WHERE type_id = 'escalation'; + +UPDATE engine_processing_versions +SET "version" = 4 +WHERE type_id = 'message'; diff --git a/migrate/migrations/20190129110250-add-cleanup-module.sql b/migrate/migrations/20190129110250-add-cleanup-module.sql new file mode 100644 index 0000000000..d31e745395 --- /dev/null +++ b/migrate/migrations/20190129110250-add-cleanup-module.sql @@ -0,0 +1,6 @@ +-- +migrate Up notransaction +ALTER TYPE engine_processing_type ADD VALUE IF NOT EXISTS 'cleanup'; +INSERT INTO engine_processing_versions (type_id) VALUES ('cleanup'); + +-- +migrate Down +DELETE FROM engine_processing_versions WHERE type_id = 'cleanup'; diff --git a/migrate/migrations/20190201104727-alert-logs-channel.sql b/migrate/migrations/20190201104727-alert-logs-channel.sql new file mode 100644 index 0000000000..6a7b3ced0d --- /dev/null +++ b/migrate/migrations/20190201104727-alert-logs-channel.sql @@ -0,0 +1,11 @@ + +-- +migrate Up notransaction +ALTER TYPE enum_alert_log_subject_type ADD VALUE IF NOT EXISTS 'channel'; + +ALTER TABLE alert_logs + ADD COLUMN sub_channel_id uuid REFERENCES notification_channels (id) ON DELETE SET NULL; +CREATE INDEX idx_alert_logs_channel_id ON alert_logs(sub_channel_id uuid_ops); + +-- +migrate Down + +ALTER TABLE alert_logs DROP COLUMN sub_channel_id; diff --git a/migrate/migrations/20190201142137-drop-sub-constraint.sql b/migrate/migrations/20190201142137-drop-sub-constraint.sql new file mode 100644 index 0000000000..76028490c5 --- /dev/null +++ b/migrate/migrations/20190201142137-drop-sub-constraint.sql @@ -0,0 +1,25 @@ + +-- +migrate Up +ALTER TABLE alert_logs + DROP CONSTRAINT alert_logs_one_subject; + +DROP TRIGGER trg_insert_alert_logs_user_last_alert ON alert_logs; +CREATE TRIGGER trg_insert_alert_logs_user_last_alert +AFTER INSERT +ON alert_logs +FOR EACH ROW +WHEN (NEW.event = 'notification_sent' AND NEW.sub_type = 'user') +EXECUTE PROCEDURE fn_insert_user_last_alert_log(); + + +-- +migrate Down +ALTER TABLE alert_logs + ADD CONSTRAINT alert_logs_one_subject CHECK (NOT (sub_user_id IS NOT NULL AND sub_integration_key_id IS NOT NULL AND sub_hb_monitor_id IS NOT NULL)); + +DROP TRIGGER trg_insert_alert_logs_user_last_alert ON alert_logs; +CREATE TRIGGER trg_insert_alert_logs_user_last_alert +AFTER INSERT +ON alert_logs +FOR EACH ROW +WHEN (NEW.event = 'notification_sent') +EXECUTE PROCEDURE fn_insert_user_last_alert_log(); diff --git a/migrate/migrations/20190225112925-config-table.sql b/migrate/migrations/20190225112925-config-table.sql new file mode 100644 index 0000000000..9fd1488744 --- /dev/null +++ b/migrate/migrations/20190225112925-config-table.sql @@ -0,0 +1,16 @@ + +-- +migrate Up +CREATE TABLE config ( + id SERIAL PRIMARY KEY, + schema INT NOT NULL, + data BYTEA NOT NULL, + created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now() +); + +CREATE TRIGGER zz_99_config_change_log +AFTER INSERT OR UPDATE OR DELETE ON config +FOR EACH ROW EXECUTE PROCEDURE process_change(); + +-- +migrate Down +DROP TRIGGER IF EXISTS zz_99_config_change_log ON config; +DROP TABLE config; diff --git a/migrate/migrations/20190312153204-slack-api-change.sql b/migrate/migrations/20190312153204-slack-api-change.sql new file mode 100644 index 0000000000..da7042f148 --- /dev/null +++ b/migrate/migrations/20190312153204-slack-api-change.sql @@ -0,0 +1,24 @@ + +-- +migrate Up + +UPDATE engine_processing_versions +SET "version" = 6 +WHERE type_id = 'message'; + +UPDATE notification_channels +SET + meta = jsonb_set(meta, '{webhookURL}', to_jsonb(value), true), + value = meta->>'chanID' +WHERE type = 'SLACK'; + +-- +migrate Down + +UPDATE notification_channels +SET + value = meta->>'webhookURL', + meta = meta - 'webhookURL' +WHERE type = 'SLACK'; + +UPDATE engine_processing_versions +SET "version" = 5 +WHERE type_id = 'message'; diff --git a/migrate/migrations/20190313125552-slack-user-link.sql b/migrate/migrations/20190313125552-slack-user-link.sql new file mode 100644 index 0000000000..f8dc295967 --- /dev/null +++ b/migrate/migrations/20190313125552-slack-user-link.sql @@ -0,0 +1,11 @@ + +-- +migrate Up + +CREATE TABLE user_slack_data ( + id UUID NOT NULL PRIMARY KEY REFERENCES users (id) ON DELETE CASCADE, + access_token TEXT NOT NULL +); + +-- +migrate Down + +DROP TABLE user_slack_data; diff --git a/migrate/migrations/20190404105850-nc-no-meta.sql b/migrate/migrations/20190404105850-nc-no-meta.sql new file mode 100644 index 0000000000..23fa791b8d --- /dev/null +++ b/migrate/migrations/20190404105850-nc-no-meta.sql @@ -0,0 +1,12 @@ + +-- +migrate Up + +UPDATE notification_channels SET meta = '{}' WHERE meta isnull; +ALTER TABLE notification_channels + ALTER COLUMN meta SET DEFAULT '{}', + ALTER COLUMN meta SET NOT NULL; + +-- +migrate Down +ALTER TABLE notification_channels + ALTER COLUMN meta DROP NOT NULL, + ALTER COLUMN meta DROP DEFAULT; diff --git a/migrate/migrations/20190517144224-trigger-config-sync.sql b/migrate/migrations/20190517144224-trigger-config-sync.sql new file mode 100644 index 0000000000..a96e223c70 --- /dev/null +++ b/migrate/migrations/20190517144224-trigger-config-sync.sql @@ -0,0 +1,21 @@ +-- +migrate Up + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION fn_notify_config_refresh() RETURNS TRIGGER AS + $$ + BEGIN + NOTIFY "/goalert/config-refresh"; + RETURN NEW; + END; + $$ LANGUAGE 'plpgsql'; +-- +migrate StatementEnd + +CREATE TRIGGER trg_config_update + AFTER INSERT ON config + FOR EACH ROW + EXECUTE PROCEDURE fn_notify_config_refresh(); + +-- +migrate Down + +DROP TRIGGER trg_config_update ON config; +DROP FUNCTION fn_notify_config_refresh(); diff --git a/notification/alert.go b/notification/alert.go new file mode 100644 index 0000000000..bc64fe37d9 --- /dev/null +++ b/notification/alert.go @@ -0,0 +1,19 @@ +package notification + +// Alert represents outgoing notifications for alerts. +type Alert struct { + Dest Dest + CallbackID string // CallbackID is the identifier used to communicate a response to the notification + AlertID int // The global alert number + Summary string + Details string +} + +var _ Message = &Alert{} + +func (a Alert) Type() MessageType { return MessageTypeAlert } +func (a Alert) ID() string { return a.CallbackID } +func (a Alert) Destination() Dest { return a.Dest } +func (a Alert) Body() string { return a.Summary } +func (a Alert) ExtendedBody() string { return a.Details } +func (a Alert) SubjectID() int { return a.AlertID } diff --git a/notification/alertstatus.go b/notification/alertstatus.go new file mode 100644 index 0000000000..ad1b6f489c --- /dev/null +++ b/notification/alertstatus.go @@ -0,0 +1,17 @@ +package notification + +type AlertStatus struct { + Dest Dest + MessageID string + AlertID int + Log string +} + +var _ Message = &AlertStatus{} + +func (s AlertStatus) Type() MessageType { return MessageTypeAlertStatus } +func (s AlertStatus) ID() string { return s.MessageID } +func (s AlertStatus) Destination() Dest { return s.Dest } +func (s AlertStatus) Body() string { return s.Log } +func (s AlertStatus) ExtendedBody() string { return "" } +func (s AlertStatus) SubjectID() int { return s.AlertID } diff --git a/notification/dest.go b/notification/dest.go new file mode 100644 index 0000000000..790f385bb7 --- /dev/null +++ b/notification/dest.go @@ -0,0 +1,26 @@ +package notification + +//go:generate go run golang.org/x/tools/cmd/stringer -type DestType + +type Dest struct { + Type DestType + Value string +} + +type DestType int + +const ( + DestTypeUnknown DestType = iota + DestTypeVoice + DestTypeSMS + DestTypeSlackChannel +) + +// IsUserCM returns true if the DestType represents a user contact method. +func (t DestType) IsUserCM() bool { + switch t { + case DestTypeSMS, DestTypeVoice: + return true + } + return false +} diff --git a/notification/desttype_string.go b/notification/desttype_string.go new file mode 100644 index 0000000000..327d4e75eb --- /dev/null +++ b/notification/desttype_string.go @@ -0,0 +1,26 @@ +// Code generated by "stringer -type DestType"; DO NOT EDIT. + +package notification + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[DestTypeUnknown-0] + _ = x[DestTypeVoice-1] + _ = x[DestTypeSMS-2] + _ = x[DestTypeSlackChannel-3] +} + +const _DestType_name = "DestTypeUnknownDestTypeVoiceDestTypeSMSDestTypeSlackChannel" + +var _DestType_index = [...]uint8{0, 15, 28, 39, 59} + +func (i DestType) String() string { + if i < 0 || i >= DestType(len(_DestType_index)-1) { + return "DestType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _DestType_name[_DestType_index[i]:_DestType_index[i+1]] +} diff --git a/notification/manager.go b/notification/manager.go new file mode 100644 index 0000000000..db6a0a04a0 --- /dev/null +++ b/notification/manager.go @@ -0,0 +1,234 @@ +package notification + +import ( + "context" + "fmt" + "github.com/target/goalert/util/log" + "strings" + "sync" + + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// Manager is used as an intermediary between Senders and Receivers. +// It should be contstructed first (with NewManager()) and passed to +// Senders and Receivers that require it. +type Manager struct { + providers map[string]*namedSender + searchOrder []*namedSender + + r Receiver + mx *sync.RWMutex + + shutdownCh chan struct{} + shutdownWg sync.WaitGroup + + stubNotifiers bool +} + +var _ Sender = &Manager{} + +// NewManager initializes a new Manager. +func NewManager() *Manager { + return &Manager{ + mx: new(sync.RWMutex), + shutdownCh: make(chan struct{}), + providers: make(map[string]*namedSender), + } +} + +// SetStubNotifiers will cause all notifications senders to be stubbed out. +// +// This causes all notifications to be marked as delivered, but not actually sent. +func (mgr *Manager) SetStubNotifiers() { + mgr.stubNotifiers = true +} + +func bgSpan(ctx context.Context, name string) (context.Context, *trace.Span) { + var sp *trace.Span + if ctx != nil { + sp = trace.FromContext(ctx) + } + if sp == nil { + return trace.StartSpan(context.Background(), name) + } + + return trace.StartSpanWithRemoteParent(context.Background(), name, sp.SpanContext()) +} + +// Shutdown will stop the manager, waiting for pending background operations to finish. +func (m *Manager) Shutdown(context.Context) error { + close(m.shutdownCh) + m.shutdownWg.Wait() + return nil +} + +func (m *Manager) senderLoop(s *namedSender) { + defer m.shutdownWg.Done() + + handleResponse := func(resp *MessageResponse) { + ctx, sp := bgSpan(resp.Ctx, "NotificationManager.Response") + cpy := *resp + cpy.Ctx = ctx + + err := m.receive(ctx, s.name, &cpy) + sp.End() + resp.Err <- err + } + + for { + select { + case resp := <-s.ListenResponse(): + handleResponse(resp) + default: + } + + select { + case resp := <-s.ListenResponse(): + handleResponse(resp) + case stat := <-s.ListenStatus(): + ctx, sp := bgSpan(stat.Ctx, "NotificationManager.StatusUpdate") + m.updateStatus(ctx, stat.wrap(ctx, s)) + sp.End() + case <-m.shutdownCh: + return + } + } +} + +// Status will return the current status of a message. +func (m *Manager) Status(ctx context.Context, id, providerMsgID string) (*MessageStatus, error) { + parts := strings.SplitN(providerMsgID, ":", 2) + if len(parts) != 2 { + return nil, errors.Errorf("invalid provider message ID '%s'", providerMsgID) + } + + provider := m.providers[parts[0]] + if provider == nil { + return nil, errors.Errorf("unknown provider ID '%s'", parts[0]) + } + + ctx, sp := trace.StartSpan(ctx, "NotificationManager.Status") + sp.AddAttributes( + trace.StringAttribute("provider.id", parts[0]), + trace.StringAttribute("provider.message.id", parts[1]), + ) + defer sp.End() + stat, err := provider.Status(ctx, id, parts[1]) + if stat != nil { + stat = stat.wrap(ctx, provider) + } + return stat, err +} + +// RegisterSender will register a sender under a given DestType and name. +// A sender for the same name and type will replace an existing one, if any. +func (m *Manager) RegisterSender(t DestType, name string, s SendResponder) { + m.mx.Lock() + defer m.mx.Unlock() + + _, ok := m.providers[name] + if ok { + panic("name already taken") + } + if m.stubNotifiers { + // disable notification sending + s = stubSender{} + } + + n := &namedSender{name: name, SendResponder: s, destType: t} + m.providers[name] = n + m.searchOrder = append(m.searchOrder, n) + m.shutdownWg.Add(1) + go m.senderLoop(n) +} + +// UpdateStatus will update the status of a message. +func (m *Manager) updateStatus(ctx context.Context, status *MessageStatus) { + err := m.r.UpdateStatus(ctx, status) + if err != nil { + log.Log(ctx, errors.Wrap(err, "update message status")) + } +} + +// RegisterReceiver will set the given Receiver as the target for all Receive() calls. +// It will panic if called multiple times. +func (m *Manager) RegisterReceiver(r Receiver) { + if m.r != nil { + panic("tried to register a second Receiver") + } + m.r = r +} + +// Send implements the Sender interface by trying all registered senders for the type given +// in Notification. An error is returned if there are no registered senders for the type +// or if an error is returned from all of them. +func (m *Manager) Send(ctx context.Context, msg Message) (*MessageStatus, error) { + m.mx.RLock() + defer m.mx.RUnlock() + + destType := msg.Destination().Type + + ctx = log.WithFields(ctx, log.Fields{ + "ProviderType": destType, + "CallbackID": msg.ID(), + }) + if a, ok := msg.(Alert); ok { + ctx = log.WithField(ctx, "AlertID", a.AlertID) + } + + var tried bool + for _, s := range m.searchOrder { + if s.destType != destType { + continue + } + tried = true + + sendCtx := log.WithField(ctx, "ProviderName", s.name) + sendCtx, sp := trace.StartSpan(sendCtx, "NotificationManager.Send") + sp.AddAttributes( + trace.StringAttribute("provider.id", s.name), + trace.StringAttribute("message.type", msg.Type().String()), + trace.StringAttribute("message.id", msg.ID()), + ) + status, err := s.Send(sendCtx, msg) + sp.End() + if err != nil { + log.Log(sendCtx, errors.Wrap(err, "send notification")) + continue + } + log.Debugf(sendCtx, "notification sent") + // status already wrapped via namedSender + return status, nil + } + if !tried { + return nil, fmt.Errorf("no senders registered for type '%s'", destType) + } + + return nil, errors.New("all notification senders failed") +} + +func (m *Manager) receive(ctx context.Context, providerID string, resp *MessageResponse) error { + ctx, sp := trace.StartSpan(ctx, "NotificationManager.Receive") + defer sp.End() + sp.AddAttributes( + trace.StringAttribute("provider.id", providerID), + trace.StringAttribute("message.id", resp.ID), + trace.StringAttribute("dest.type", string(resp.From.Type)), + trace.StringAttribute("dest.value", resp.From.Value), + trace.StringAttribute("response", resp.Result.String()), + ) + log.Debugf(log.WithFields(ctx, log.Fields{ + "Result": resp.Result, + "CallbackID": resp.ID, + "ProviderID": providerID, + }), + "response received", + ) + if resp.Result == ResultStop { + return m.r.Stop(ctx, resp.From) + } + + return m.r.Receive(ctx, resp.ID, resp.Result) +} diff --git a/notification/message.go b/notification/message.go new file mode 100644 index 0000000000..1e171cf9ad --- /dev/null +++ b/notification/message.go @@ -0,0 +1,25 @@ +package notification + +//go:generate go run golang.org/x/tools/cmd/stringer -type MessageType + +// A Message contains information that can be provided +// to a user for notification. +type Message interface { + ID() string + Type() MessageType + Destination() Dest + SubjectID() int + Body() string + ExtendedBody() string +} + +// MessageType indicates the type of notification message. +type MessageType int + +// Allowed types +const ( + MessageTypeAlert MessageType = iota + MessageTypeAlertStatus + MessageTypeTest + MessageTypeVerification +) diff --git a/notification/messagestatus.go b/notification/messagestatus.go new file mode 100644 index 0000000000..202fe13b3b --- /dev/null +++ b/notification/messagestatus.go @@ -0,0 +1,69 @@ +package notification + +import "context" + +// MessageStatus represents the state of an outgoing message. +type MessageStatus struct { + // Ctx is the context of this status update (used for tracing if provided). + Ctx context.Context + + // ID is the GoAlert message ID. + ID string + + // ProviderMessageID is a string that represents the provider-specific ID of the message (e.g. Twilio SID). + ProviderMessageID string + + // State is the current state. + State MessageState + + // Details can contain any additional information about the State (e.g. "ringing", "no-answer" etc..). + Details string + + // Sequence can be used when the provider sends updates out-of order (e.g. Twilio). + // The Sequence number defaults to 0, and a status update is ignored unless it's + // Sequence number is >= the current one. + Sequence int +} + +func (stat *MessageStatus) wrap(ctx context.Context, n *namedSender) *MessageStatus { + if stat == nil { + return nil + } + + s := *stat + if ctx != nil { + s.Ctx = ctx + } + s.ProviderMessageID = n.name + ":" + s.ProviderMessageID + return &s +} + +// MessageState represents the current state of an outgoing message. +type MessageState int + +const ( + // MessageStateActive should be specified when a message is still active. + // This includes things like remotely queued, ringing, or in-progress calls. + MessageStateActive MessageState = iota + + // MessageStateSent means the message has been sent completely, but may not + // have been delivered (or delivery confirmation is not supported.). For + // example, an SMS on the carrier network (but not device) or a voice call + // that rang but got `no-answer`. + MessageStateSent + + // MessageStateDelivered means the message is completed and was received + // by the end device. SMS delivery confirmation, or a voice call was + // completed (including if it was voice mail). + MessageStateDelivered + + // MessageStateFailedTemp should be set when a message was not sent (no SMS or ringing phone) + // but a subsequent try later may succeed. (e.g. voice call with busy signal). + MessageStateFailedTemp + + // MessageStateFailedPerm should be set when a message was not sent (no SMS or ringing phone) + // but a subsequent attempt will not be expected to succeed. For messages that fail due to + // invalid config, they should set this state, as without manual intervention, a retry + // will also fail. + MessageStateFailedPerm +) diff --git a/notification/messagetype_string.go b/notification/messagetype_string.go new file mode 100644 index 0000000000..5d2b8a149f --- /dev/null +++ b/notification/messagetype_string.go @@ -0,0 +1,26 @@ +// Code generated by "stringer -type MessageType"; DO NOT EDIT. + +package notification + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[MessageTypeAlert-0] + _ = x[MessageTypeAlertStatus-1] + _ = x[MessageTypeTest-2] + _ = x[MessageTypeVerification-3] +} + +const _MessageType_name = "MessageTypeAlertMessageTypeAlertStatusMessageTypeTestMessageTypeVerification" + +var _MessageType_index = [...]uint8{0, 16, 38, 53, 76} + +func (i MessageType) String() string { + if i < 0 || i >= MessageType(len(_MessageType_index)-1) { + return "MessageType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _MessageType_name[_MessageType_index[i]:_MessageType_index[i+1]] +} diff --git a/notification/namedsender.go b/notification/namedsender.go new file mode 100644 index 0000000000..ce0b24c9cb --- /dev/null +++ b/notification/namedsender.go @@ -0,0 +1,16 @@ +package notification + +import ( + "context" +) + +type namedSender struct { + SendResponder + name string + destType DestType +} + +func (s *namedSender) Send(ctx context.Context, msg Message) (*MessageStatus, error) { + status, err := s.SendResponder.Send(ctx, msg) + return status.wrap(ctx, s), err +} diff --git a/notification/notifier.go b/notification/notifier.go new file mode 100644 index 0000000000..fac2dce493 --- /dev/null +++ b/notification/notifier.go @@ -0,0 +1,52 @@ +package notification + +//go:generate go run golang.org/x/tools/cmd/stringer -type Result + +import ( + "context" +) + +// Result specifies a response to a notification. +type Result int + +// Possible notification responses. +const ( + ResultAcknowledge Result = iota + ResultResolve + ResultStop +) + +// A Receiver is something that can process a notification result. +type Receiver interface { + UpdateStatus(context.Context, *MessageStatus) error + Receive(ctx context.Context, callbackID string, result Result) error + Stop(context.Context, Dest) error +} + +// A Sender is something that can send a notification. +type Sender interface { + + // Send should return nil if the notification was sent successfully. It should be expected + // that a returned error means that the notification should be attempted again. + Send(context.Context, Message) (*MessageStatus, error) + + Status(ctx context.Context, id, providerID string) (*MessageStatus, error) +} + +// A SendResponder can send messages and provide status and responses +type SendResponder interface { + Sender + + ListenStatus() <-chan *MessageStatus + ListenResponse() <-chan *MessageResponse +} + +// MessageResponse represents a received response from a user. +type MessageResponse struct { + Ctx context.Context + ID string + From Dest + Result Result + + Err chan error +} diff --git a/notification/result_string.go b/notification/result_string.go new file mode 100644 index 0000000000..a2b6a0e0d4 --- /dev/null +++ b/notification/result_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type Result"; DO NOT EDIT. + +package notification + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ResultAcknowledge-0] + _ = x[ResultResolve-1] + _ = x[ResultStop-2] +} + +const _Result_name = "ResultAcknowledgeResultResolveResultStop" + +var _Result_index = [...]uint8{0, 17, 30, 40} + +func (i Result) String() string { + if i < 0 || i >= Result(len(_Result_index)-1) { + return "Result(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Result_name[_Result_index[i]:_Result_index[i+1]] +} diff --git a/notification/slack/cache.go b/notification/slack/cache.go new file mode 100644 index 0000000000..34c10bc740 --- /dev/null +++ b/notification/slack/cache.go @@ -0,0 +1,43 @@ +package slack + +import ( + "time" + + "github.com/golang/groupcache/lru" +) + +type ttlCache struct { + *lru.Cache + ttl time.Duration +} + +func newTTLCache(maxEntries int, ttl time.Duration) *ttlCache { + return &ttlCache{ + ttl: ttl, + Cache: lru.New(maxEntries), + } +} + +type cacheItem struct { + expires time.Time + value interface{} +} + +func (c *ttlCache) Add(key lru.Key, value interface{}) { + c.Cache.Add(key, cacheItem{ + value: value, + expires: time.Now().Add(c.ttl), + }) +} + +func (c *ttlCache) Get(key lru.Key) (interface{}, bool) { + item, ok := c.Cache.Get(key) + if !ok { + return nil, false + } + cItem := item.(cacheItem) + if time.Until(cItem.expires) > 0 { + return cItem.value, true + } + return nil, false +} diff --git a/notification/slack/channel.go b/notification/slack/channel.go new file mode 100644 index 0000000000..7beec65d51 --- /dev/null +++ b/notification/slack/channel.go @@ -0,0 +1,313 @@ +package slack + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "strconv" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/target/goalert/config" + "github.com/target/goalert/notification" + "github.com/target/goalert/permission" + "github.com/target/goalert/validation" + "golang.org/x/net/context/ctxhttp" +) + +type ChannelSender struct { + cfg Config + resp chan *notification.MessageResponse + status chan *notification.MessageStatus + + chanTht *throttle + listTht *throttle + + chanCache *ttlCache + listCache *ttlCache + + listMx sync.Mutex + chanMx sync.Mutex +} + +var _ notification.SendResponder = &ChannelSender{} + +func NewChannelSender(ctx context.Context, cfg Config) (*ChannelSender, error) { + return &ChannelSender{ + cfg: cfg, + resp: make(chan *notification.MessageResponse), + status: make(chan *notification.MessageStatus), + + chanTht: newThrottle(time.Minute / 50), + listTht: newThrottle(time.Minute / 50), + + listCache: newTTLCache(250, time.Minute), + chanCache: newTTLCache(1000, 15*time.Minute), + }, nil +} + +// Channel contains information about a Slack channel. +type Channel struct { + ID string + Name string +} + +type slackError string + +func (err slackError) Error() string { return string(err) } +func (err slackError) ClientError() bool { return true } +func wrapError(errMsg, details string) error { + switch errMsg { + case "missing_scope": + // happens if the ID is for a user + return validation.NewFieldError("ChannelID", "Only channels supported.") + case "channel_not_found": + return validation.NewFieldError("ChannelID", "Invalid Slack channel ID.") + case "invalid_auth", "account_inactive", "token_revoked", "not_authed": + return slackError("User account must be linked.") + } + return errors.Wrap(errors.New(errMsg), details) +} + +// Channel will lookup a single Slack channel for the bot. +func (s *ChannelSender) Channel(ctx context.Context, channelID string) (*Channel, error) { + err := permission.LimitCheckAny(ctx, permission.User, permission.System) + if err != nil { + return nil, err + } + + s.chanMx.Lock() + defer s.chanMx.Unlock() + res, ok := s.chanCache.Get(channelID) + if !ok { + ch, err := s.loadChannel(ctx, channelID) + if err != nil { + return nil, err + } + s.chanCache.Add(channelID, ch) + return ch, nil + } + if err != nil { + return nil, err + } + + return res.(*Channel), nil +} + +func (s *ChannelSender) loadChannel(ctx context.Context, channelID string) (*Channel, error) { + cfg := config.FromContext(ctx) + + v := make(url.Values) + // Parameters and URL documented here: + // https://api.slack.com/methods/conversations.info + v.Set("token", cfg.Slack.AccessToken) + v.Set("channel", channelID) + + infoURL := s.cfg.url("/api/conversations.info") + + var resData struct { + OK bool + Error string + Channel struct { + ID string + Name string + } + } + + err := s.chanTht.Wait(ctx) + if err != nil { + return nil, err + } + resp, err := ctxhttp.PostForm(ctx, http.DefaultClient, infoURL, v) + if err != nil { + return nil, err + } + if resp.StatusCode == 429 { + // respect Retry-After (seconds) if possible + sec, err := strconv.Atoi(resp.Header.Get("Retry-After")) + if err == nil { + s.chanTht.SetWaitUntil(time.Now().Add(time.Second * time.Duration(sec))) + // try again + return s.loadChannel(ctx, channelID) + } + } + + if resp.StatusCode != 200 { + resp.Body.Close() + return nil, errors.New("non-200 response from Slack: " + resp.Status) + } + err = json.NewDecoder(resp.Body).Decode(&resData) + resp.Body.Close() + if err != nil { + return nil, errors.Wrap(err, "parse JSON") + } + + if !resData.OK { + return nil, wrapError(resData.Error, "lookup Slack channel") + } + + return &Channel{ + ID: resData.Channel.ID, + Name: "#" + resData.Channel.Name, + }, nil +} + +// ListChannels will return a list of channels visible to the slack bot. +func (s *ChannelSender) ListChannels(ctx context.Context) ([]Channel, error) { + err := permission.LimitCheckAny(ctx, permission.User, permission.System) + if err != nil { + return nil, err + } + + cfg := config.FromContext(ctx) + s.listMx.Lock() + defer s.listMx.Unlock() + res, ok := s.listCache.Get(cfg.Slack.AccessToken) + if !ok { + chs, err := s.loadChannels(ctx) + if err != nil { + return nil, err + } + ch2 := make([]Channel, len(chs)) + copy(ch2, chs) + s.listCache.Add(cfg.Slack.AccessToken, ch2) + return chs, nil + } + if err != nil { + return nil, err + } + + chs := res.([]Channel) + cpy := make([]Channel, len(chs)) + copy(cpy, chs) + + return cpy, nil +} + +func (s *ChannelSender) loadChannels(ctx context.Context) ([]Channel, error) { + cfg := config.FromContext(ctx) + v := make(url.Values) + // Parameters and URL documented here: + // https://api.slack.com/methods/users.conversations + v.Set("token", cfg.Slack.AccessToken) + v.Set("exclude_archived", "true") + + // Using `Set` instead of `Add` here. Slack expects a comma-delimited list instead of + // an array-encoded parameter. + v.Set("types", "private_channel,public_channel") + v.Set("limit", "200") + listURL := s.cfg.url("/api/users.conversations") + + n := 0 + var channels []Channel + for { + n++ + if n > 10 { + return nil, errors.New("abort after > 10 pages of Slack channels") + } + + err := s.listTht.Wait(ctx) + if err != nil { + return nil, err + } + resp, err := ctxhttp.PostForm(ctx, http.DefaultClient, listURL, v) + if err != nil { + return nil, err + } + if resp.StatusCode == 429 { + resp.Body.Close() + // respect Retry-After (seconds) if possible + sec, err := strconv.Atoi(resp.Header.Get("Retry-After")) + if err == nil { + s.listTht.SetWaitUntil(time.Now().Add(time.Second * time.Duration(sec))) + // no need to start over, re-fetch current page + continue + } + } + if resp.StatusCode != 200 { + resp.Body.Close() + return nil, errors.New("non-200 response from Slack: " + resp.Status) + } + + var resData struct { + OK bool + Error string + Channels []Channel + Meta struct { + NextCursor string `json:"next_cursor"` + } `json:"response_metadata"` + } + + err = json.NewDecoder(resp.Body).Decode(&resData) + resp.Body.Close() + if err != nil { + return nil, errors.Wrap(err, "parse JSON") + } + + if !resData.OK { + return nil, wrapError(resData.Error, "list Slack channels") + } + + channels = append(channels, resData.Channels...) + + if resData.Meta.NextCursor == "" { + break + } + + v.Set("cursor", resData.Meta.NextCursor) + } + + for i := range channels { + channels[i].Name = "#" + channels[i].Name + } + + return channels, nil +} + +func (s *ChannelSender) Send(ctx context.Context, msg notification.Message) (*notification.MessageStatus, error) { + cfg := config.FromContext(ctx) + + vals := make(url.Values) + // Parameters & URL documented here: + // https://api.slack.com/methods/chat.postMessage + vals.Set("channel", msg.Destination().Value) + vals.Set("text", fmt.Sprintf("Alert: %s\n\n<%s>", msg.Body(), cfg.CallbackURL("/alerts/"+strconv.Itoa(msg.SubjectID())))) + vals.Set("token", cfg.Slack.AccessToken) + + resp, err := http.PostForm(s.cfg.url("/api/chat.postMessage"), vals) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != 200 { + return nil, errors.Errorf("non-200 response: %s", resp.Status) + } + + var resData struct { + OK bool + Error string + TS string + } + err = json.NewDecoder(resp.Body).Decode(&resData) + if err != nil { + return nil, errors.Wrap(err, "decode response") + } + if !resData.OK { + return nil, errors.Errorf("Slack error: %s", resData.Error) + } + + return ¬ification.MessageStatus{ + ID: msg.ID(), + ProviderMessageID: resData.TS, + State: notification.MessageStateDelivered, + }, nil +} +func (s *ChannelSender) Status(ctx context.Context, id, providerID string) (*notification.MessageStatus, error) { + return nil, errors.New("not implemented") +} + +func (s *ChannelSender) ListenStatus() <-chan *notification.MessageStatus { return s.status } +func (s *ChannelSender) ListenResponse() <-chan *notification.MessageResponse { return s.resp } diff --git a/notification/slack/config.go b/notification/slack/config.go new file mode 100644 index 0000000000..23f1359e3d --- /dev/null +++ b/notification/slack/config.go @@ -0,0 +1,21 @@ +package slack + +import ( + "strings" +) + +// Config contains values used for the Slack notification sender. +type Config struct { + BaseURL string +} + +func (c Config) url(path string) string { + if c.BaseURL != "" { + return strings.TrimSuffix(c.BaseURL, "/") + path + } + if strings.HasPrefix(path, "/api") { + return "https://api.slack.com" + path + } + + return "https://slack.com" + path +} diff --git a/notification/slack/throttle.go b/notification/slack/throttle.go new file mode 100644 index 0000000000..13be11cb6b --- /dev/null +++ b/notification/slack/throttle.go @@ -0,0 +1,50 @@ +package slack + +import ( + "context" + "sync" + "time" +) + +type throttle struct { + tick *time.Ticker + waitUntil time.Time + mx sync.Mutex +} + +func newThrottle(dur time.Duration) *throttle { + return &throttle{ + tick: time.NewTicker(dur), + } +} + +func (t *throttle) Wait(ctx context.Context) error { + t.mx.Lock() + dur := time.Until(t.waitUntil) + t.mx.Unlock() + + if dur > 0 { + tm := time.NewTimer(dur) + select { + case <-ctx.Done(): + return ctx.Err() + case <-tm.C: + } + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.tick.C: + } + + return nil +} + +func (t *throttle) SetWaitUntil(end time.Time) { + t.mx.Lock() + if end.After(t.waitUntil) { + t.waitUntil = end + } + t.mx.Unlock() +} diff --git a/notification/store.go b/notification/store.go new file mode 100644 index 0000000000..d217c1c56a --- /dev/null +++ b/notification/store.go @@ -0,0 +1,306 @@ +package notification + +import ( + "context" + cRand "crypto/rand" + "database/sql" + "encoding/binary" + "fmt" + "github.com/target/goalert/permission" + "github.com/target/goalert/util" + "github.com/target/goalert/validation" + "github.com/target/goalert/validation/validate" + "math/rand" + "time" + + "github.com/pkg/errors" + uuid "github.com/satori/go.uuid" +) + +const minTimeBetweenTests = time.Minute + +type Store interface { + SendContactMethodTest(ctx context.Context, cmID string) error + SendContactMethodVerification(ctx context.Context, cmID string, resend bool) error + VerifyContactMethod(ctx context.Context, cmID string, code int) ([]string, error) + CodeExpiration(ctx context.Context, cmID string) (*time.Time, error) + Code(ctx context.Context, id string) (int, error) +} + +var _ Store = &DB{} + +type DB struct { + db *sql.DB + getCMUserID *sql.Stmt + setVerificationCode *sql.Stmt + verifyVerificationCode *sql.Stmt + enableContactMethods *sql.Stmt + insertTestNotification *sql.Stmt + updateLastSendTime *sql.Stmt + codeExpiration *sql.Stmt + getCode *sql.Stmt + sendTestLock *sql.Stmt + + rand *rand.Rand +} + +func NewDB(ctx context.Context, db *sql.DB) (*DB, error) { + p := &util.Prepare{DB: db, Ctx: ctx} + + var seed int64 + err := binary.Read(cRand.Reader, binary.BigEndian, &seed) + if err != nil { + return nil, errors.Wrap(err, "generate random seed") + } + + return &DB{ + db: db, + + rand: rand.New(rand.NewSource(seed)), + + getCMUserID: p.P(`select user_id from user_contact_methods where id = $1`), + + sendTestLock: p.P(`lock outgoing_messages, user_contact_methods in row exclusive mode`), + + getCode: p.P(` + select code + from user_verification_codes + where id = $1 + `), + + codeExpiration: p.P(` + select expires_at + from user_verification_codes v + join user_contact_methods cm on cm.id = $1 + where v.user_id = cm.user_id and v.contact_method_value = cm.value + `), + + setVerificationCode: p.P(` + insert into user_verification_codes (id, user_id, contact_method_value, code, expires_at, send_to) + select + $1, + cm.user_id, + cm.value, + $3, + now() + cast($4 as interval), + $2 + from user_contact_methods cm + where id = $2 + on conflict (user_id, contact_method_value) do update + set + send_to = $2, + expires_at = case when $5 then user_verification_codes.expires_at else EXCLUDED.expires_at end, + code = case when $5 then user_verification_codes.code else EXCLUDED.code end + `), + verifyVerificationCode: p.P(` + delete from user_verification_codes v + using user_contact_methods cm + where + cm.id = $1 and + v.contact_method_value = cm.value and + v.user_id = cm.user_id and + v.code = $2 + returning cm.value + `), + + enableContactMethods: p.P(` + update user_contact_methods + set disabled = false + where user_id = $1 and value = $2 + returning id + `), + + updateLastSendTime: p.P(` + update user_contact_methods + set last_test_verify_at = now() + where + id = $1 and + ( + last_test_verify_at + cast($2 as interval) < now() + or + last_test_verify_at isnull + ) + `), + + insertTestNotification: p.P(` + insert into outgoing_messages (id, message_type, contact_method_id, user_id) + select + $1, + 'test_notification', + $2, + cm.user_id + from user_contact_methods cm + where cm.id = $2 + `), + }, p.Err +} + +func (db *DB) cmUserID(ctx context.Context, id string) (string, error) { + err := permission.LimitCheckAny(ctx, permission.User, permission.Admin) + if err != nil { + return "", err + } + + err = validate.UUID("ContactMethodID", id) + if err != nil { + return "", err + } + + var userID string + err = db.getCMUserID.QueryRowContext(ctx, id).Scan(&userID) + if err != nil { + return "", err + } + + // only admin or same-user can verify + err = permission.LimitCheckAny(ctx, permission.Admin, permission.MatchUser(userID)) + if err != nil { + return "", err + } + + return userID, nil +} + +func (db *DB) Code(ctx context.Context, id string) (int, error) { + err := permission.LimitCheckAny(ctx, permission.System) + if err != nil { + return 0, err + } + err = validate.UUID("VerificationCodeID", id) + if err != nil { + return 0, err + } + + var code int + err = db.getCode.QueryRowContext(ctx, id).Scan(&code) + return code, err +} + +func (db *DB) CodeExpiration(ctx context.Context, id string) (t *time.Time, err error) { + _, err = db.cmUserID(ctx, id) + if err != nil { + return nil, err + } + + err = db.codeExpiration.QueryRowContext(ctx, id).Scan(&t) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, err + } + + return t, nil +} + +func (db *DB) SendContactMethodTest(ctx context.Context, id string) error { + _, err := db.cmUserID(ctx, id) + if err != nil { + return err + } + tx, err := db.db.BeginTx(ctx, nil) + if err != nil { + return err + } + defer tx.Rollback() + + // Lock outgoing_messages first, before we modify user_contact methods + // to prevent deadlock. + _, err = tx.Stmt(db.sendTestLock).ExecContext(ctx) + if err != nil { + return err + } + + r, err := tx.Stmt(db.updateLastSendTime).ExecContext(ctx, id, fmt.Sprintf("%f seconds", minTimeBetweenTests.Seconds())) + if err != nil { + return err + } + rows, err := r.RowsAffected() + if err != nil { + return err + } + if rows != 1 { + return validation.NewFieldError("ContactMethod", "test message rate-limit exceeded") + } + + vID := uuid.NewV4().String() + _, err = tx.Stmt(db.insertTestNotification).ExecContext(ctx, vID, id) + if err != nil { + return err + } + + return tx.Commit() +} + +func (db *DB) SendContactMethodVerification(ctx context.Context, id string, resend bool) error { + _, err := db.cmUserID(ctx, id) + if err != nil { + return err + } + + tx, err := db.db.BeginTx(ctx, nil) + if err != nil { + return err + } + defer tx.Rollback() + + r, err := tx.Stmt(db.updateLastSendTime).ExecContext(ctx, id, fmt.Sprintf("%f seconds", minTimeBetweenTests.Seconds())) + if err != nil { + return err + } + rows, err := r.RowsAffected() + if err != nil { + return err + } + if rows != 1 { + return validation.NewFieldError("ContactMethod", "test message rate-limit exceeded") + } + + vID := uuid.NewV4().String() + code := db.rand.Intn(900000) + 100000 + _, err = tx.Stmt(db.setVerificationCode).ExecContext(ctx, vID, id, code, fmt.Sprintf("%f seconds", (15*time.Minute).Seconds()), resend) + if err != nil { + return errors.Wrap(err, "set verification code") + } + + return tx.Commit() +} + +func (db *DB) VerifyContactMethod(ctx context.Context, cmID string, code int) ([]string, error) { + userID, err := db.cmUserID(ctx, cmID) + if err != nil { + return nil, err + } + + tx, err := db.db.BeginTx(ctx, nil) + if err != nil { + return nil, err + } + defer tx.Rollback() + + var cmValue string + err = db.verifyVerificationCode.QueryRowContext(ctx, cmID, code).Scan(&cmValue) + if err == sql.ErrNoRows { + return nil, validation.NewFieldError("Code", "unrecognized code") + } + if err != nil { + return nil, err + } + + rows, err := db.enableContactMethods.QueryContext(ctx, userID, cmValue) + if err != nil { + return nil, err + } + defer rows.Close() + var result []string + + for rows.Next() { + var id string + err = rows.Scan(&id) + if err != nil { + return nil, err + } + result = append(result, id) + } + return result, tx.Commit() +} diff --git a/notification/stubsender.go b/notification/stubsender.go new file mode 100644 index 0000000000..2648dd08bf --- /dev/null +++ b/notification/stubsender.go @@ -0,0 +1,26 @@ +package notification + +import ( + "context" + + "github.com/pkg/errors" + uuid "github.com/satori/go.uuid" +) + +type stubSender struct{} + +var _ SendResponder = stubSender{} + +func (stubSender) Send(ctx context.Context, msg Message) (*MessageStatus, error) { + return &MessageStatus{ + Ctx: ctx, + ProviderMessageID: "stub_" + uuid.NewV4().String(), + ID: msg.ID(), + State: MessageStateDelivered, + }, nil +} +func (stubSender) Status(ctx context.Context, id, providerID string) (*MessageStatus, error) { + return nil, errors.New("not implemented") +} +func (stubSender) ListenStatus() <-chan *MessageStatus { return make(chan *MessageStatus) } +func (stubSender) ListenResponse() <-chan *MessageResponse { return make(chan *MessageResponse) } diff --git a/notification/testnotification.go b/notification/testnotification.go new file mode 100644 index 0000000000..d7d22afa71 --- /dev/null +++ b/notification/testnotification.go @@ -0,0 +1,16 @@ +package notification + +// Test represents outgoing test notification. +type Test struct { + Dest Dest + CallbackID string // CallbackID is the identifier used to communicate a response to the notification +} + +var _ Message = &Test{} + +func (t Test) Type() MessageType { return MessageTypeTest } +func (t Test) ID() string { return t.CallbackID } +func (t Test) Destination() Dest { return t.Dest } +func (t Test) Body() string { return "" } +func (t Test) ExtendedBody() string { return "" } +func (t Test) SubjectID() int { return -1 } diff --git a/notification/twilio/alertsms.go b/notification/twilio/alertsms.go new file mode 100644 index 0000000000..9ead05ca15 --- /dev/null +++ b/notification/twilio/alertsms.go @@ -0,0 +1,122 @@ +package twilio + +import ( + "bytes" + "strings" + "text/template" + "unicode" + + "github.com/pkg/errors" +) + +// 160 GSM characters (140 bytes) is the max for a single segment message. +// Multi-segment messages include a 6-byte header limiting to 153 GSM characters +// per segment. +// +// Non-GSM will use UCS-2 encoding, using 2-bytes per character. The max would +// then be 70 or 67 characters for single or multi-segmented messages, respectively. +const maxGSMLen = 160 + +type alertSMS struct { + ID int + Body string + Link string + Code int +} + +var smsTmpl = template.Must(template.New("alertSMS").Parse( + `Alert #{{.ID}}: {{.Body}} +{{- if .Link }} + +{{.Link}} +{{- end}} +{{- if .Code}} + +Reply '{{.Code}}a' to ack, '{{.Code}}c' to close. +{{- end}}`, +)) + +const gsmAlphabet = "@∆ 0¡P¿p£!1AQaq$Φ\"2BRbr¥Γ#3CScsèΛ¤4DTdtéΩ%5EUeuùΠ&6FVfvìΨ'7GWgwòΣ(8HXhxÇΘ)9IYiy\n Ξ *:JZjzØ+;KÄkäøÆ,NÜnüåÉ/?O§oà" + +var gsmChr = make(map[rune]bool, len(gsmAlphabet)) + +func init() { + for _, r := range gsmAlphabet { + gsmChr[r] = true + } +} + +func mapGSM(r rune) rune { + if unicode.IsSpace(r) { + return ' ' + } + + if !unicode.IsPrint(r) { + return -1 + } + + if gsmChr[r] { + return r + } + + // Map similar characters to keep as much meaning as possible. + switch r { + case '_', '|', '~': + return '-' + case '[', '{': + return '(' + case ']', '}': + return ')' + case '»': + return '>' + case '`', '’', '‘': + return '\'' + } + + switch { + case unicode.Is(unicode.Dash, r): + return '-' + case unicode.Is(unicode.Quotation_Mark, r): + return '"' + } + + // If no substitute, replace with '?' + return '?' +} + +// hasTwoWaySMSSupport returns true if a number supports 2-way SMS messaging (replies). +func hasTwoWaySMSSupport(number string) bool { + // India numbers do not support SMS replies. + return !strings.HasPrefix(number, "+91") +} + +// Render will render a single-segment SMS. +// +// Non-GSM characters will be replaced with '?' and Body will be +// truncated (if needed) until the output is <= 160 characters. +func (a alertSMS) Render() (string, error) { + a.Body = strings.Map(mapGSM, a.Body) + a.Body = strings.Replace(a.Body, " ", " ", -1) + a.Body = strings.TrimSpace(a.Body) + + var buf bytes.Buffer + err := smsTmpl.Execute(&buf, a) + if err != nil { + return "", err + } + + if buf.Len() > maxGSMLen { + newBodyLen := len(a.Body) - (buf.Len() - maxGSMLen) + if newBodyLen <= 0 { + return "", errors.New("message too long to include body") + } + a.Body = strings.TrimSpace(a.Body[:newBodyLen]) + buf.Reset() + err = smsTmpl.Execute(&buf, a) + if err != nil { + return "", err + } + } + + return buf.String(), nil +} diff --git a/notification/twilio/alertsms_test.go b/notification/twilio/alertsms_test.go new file mode 100644 index 0000000000..296f42ebe2 --- /dev/null +++ b/notification/twilio/alertsms_test.go @@ -0,0 +1,131 @@ +package twilio + +import ( + "strconv" + "strings" + "testing" +) + +func TestMapGSM(t *testing.T) { + check := func(input, exp string) { + t.Run("", func(t *testing.T) { + res := strings.Map(mapGSM, input) + if res != exp { + t.Errorf("got %s; want %s", strconv.Quote(res), strconv.Quote(exp)) + } + }) + } + + check("foo\\bar", "foo?bar") + check("foo\bar", "fooar") + check("foo\nbar", "foo bar") + check("foo\t bar@/ok:asdf", "foo bar@/ok:asdf") + check("[Testing] {alert_message: `okay`}", "(Testing) (alert-message: 'okay')") +} + +func TestAlertSMS_Render(t *testing.T) { + check := func(name string, a alertSMS, exp string) { + t.Run(name, func(t *testing.T) { + res, err := a.Render() + if len(res) > 160 { + t.Errorf("message exceeded 160 characters") + } else { + t.Log("Length", len(res)) + } + if err != nil && exp != "" { + t.Fatalf("got err %v; want nil", err) + } else if err == nil && exp == "" { + t.Log(res) + t.Fatal("got nil; want err") + } + + if res != exp { + t.Errorf("got %s; want %s", strconv.Quote(res), strconv.Quote(exp)) + } + }) + } + + check("normal", + alertSMS{ + ID: 123, + Code: 1, + Link: "https://example.com/alerts/123", + Body: "Testing", + }, + `Alert #123: Testing + +https://example.com/alerts/123 + +Reply '1a' to ack, '1c' to close.`, + ) + + check("no-reply", + alertSMS{ + ID: 123, + Link: "https://example.com/alerts/123", + Body: "Testing", + }, + `Alert #123: Testing + +https://example.com/alerts/123`, + ) + + check("no-Link", + alertSMS{ + ID: 123, + Code: 1, + Body: "Testing", + }, + `Alert #123: Testing + +Reply '1a' to ack, '1c' to close.`, + ) + + check("no-reply-Link", + alertSMS{ + ID: 123, + Body: "Testing", + }, + `Alert #123: Testing`, + ) + + check("truncate", + alertSMS{ + ID: 123, + Code: 1, + Link: "https://example.com/alerts/123", + Body: "Testing with a really really obnoxiously long message that will be need to be truncated at some point.", + }, + `Alert #123: Testing with a really really obnoxiously long message that will be need to be tru + +https://example.com/alerts/123 + +Reply '1a' to ack, '1c' to close.`, + ) + + check("truncate-long-id", + alertSMS{ + ID: 123456789, + Code: 1, + Link: "https://example.com/alerts/123", + Body: "Testing with a really really obnoxiously long message that will be need to be truncated at some point.", + }, + `Alert #123456789: Testing with a really really obnoxiously long message that will be need to + +https://example.com/alerts/123 + +Reply '1a' to ack, '1c' to close.`, + ) + + check("message-too-long", + // can't fit body + alertSMS{ + ID: 123456789, + Code: 123456789, + Link: "https://example.com/alerts/123ff/123ff/123ff/123ff/123ff/123ff/123ff/123ff/123ff/123ff/123ff", + Body: "Testing with a really really obnoxiously long message that will be need to be truncated at some point.", + }, + "", + ) + +} diff --git a/notification/twilio/call.go b/notification/twilio/call.go new file mode 100644 index 0000000000..3a2fdb6de8 --- /dev/null +++ b/notification/twilio/call.go @@ -0,0 +1,90 @@ +package twilio + +import ( + "fmt" + "github.com/target/goalert/notification" + "time" +) + +// CallStatus indicates the state of a voice call. +// +// https://www.twilio.com/docs/api/twiml/twilio_request#request-parameters-call-status +type CallStatus string + +// Defined status values for voice calls. +const ( + CallStatusUnknown = CallStatus("") + CallStatusInitiated = CallStatus("initiated") + CallStatusQueued = CallStatus("queued") + CallStatusRinging = CallStatus("ringing") + CallStatusInProgress = CallStatus("in-progress") + CallStatusCompleted = CallStatus("completed") + CallStatusBusy = CallStatus("busy") + CallStatusFailed = CallStatus("failed") + CallStatusNoAnswer = CallStatus("no-answer") + CallStatusCanceled = CallStatus("canceled") +) + +// Scan implements the sql.Scanner interface. +func (s *CallStatus) Scan(value interface{}) error { + switch t := value.(type) { + case []byte: + *s = CallStatus(t) + case string: + *s = CallStatus(t) + case nil: + *s = CallStatusUnknown + default: + return fmt.Errorf("could not process unknown type for Status(%T)", t) + } + return nil +} + +// CallErrorCode is an error code encountered when making a call. +type CallErrorCode int + +// Call represents a Twilio voice call. +type Call struct { + SID string + To string + From string + Status CallStatus + SequenceNumber *int + Direction string + CallDuration time.Duration + ErrorMessage *string + ErrorCode *CallErrorCode +} + +func (call *Call) messageStatus(id string) *notification.MessageStatus { + if call == nil { + return nil + } + + status := ¬ification.MessageStatus{ + ID: id, + ProviderMessageID: call.SID, + } + if call.ErrorMessage != nil && call.ErrorCode != nil { + status.Details = fmt.Sprintf("%s: [%d] %s", call.Status, *call.ErrorCode, *call.ErrorMessage) + } else { + status.Details = string(call.Status) + } + if call.SequenceNumber != nil { + status.Sequence = *call.SequenceNumber + } + + switch call.Status { + case CallStatusCompleted: + status.State = notification.MessageStateDelivered + case CallStatusInitiated, CallStatusQueued: + status.State = notification.MessageStateActive + case CallStatusBusy: + status.State = notification.MessageStateFailedTemp + case CallStatusFailed, CallStatusCanceled, CallStatusNoAnswer: + status.State = notification.MessageStateFailedPerm + default: + status.State = notification.MessageStateSent + } + return status +} diff --git a/notification/twilio/client.go b/notification/twilio/client.go new file mode 100644 index 0000000000..e8d3ed200f --- /dev/null +++ b/notification/twilio/client.go @@ -0,0 +1,310 @@ +package twilio + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/target/goalert/config" +) + +// DefaultTwilioAPIURL is the value that will be used if Config.APIURL is empty. +const DefaultTwilioAPIURL = "https://api.twilio.com/2010-04-01" + +// SMSOptions allows configuring outgoing SMS messages. +type SMSOptions struct { + // ValidityPeriod controls how long a message will still be valid in Twilio's queue. + ValidityPeriod time.Duration + + // CallbackParams will be added to callback URLs + CallbackParams url.Values +} + +// VoiceOptions allows configuring outgoing voice calls. +type VoiceOptions struct { + // ValidityPeriod controls how long a message will still be valid in Twilio's queue. + ValidityPeriod time.Duration + + // The call type/message. + CallType CallType + + // CallbackParams will be added to all callback URLs + CallbackParams url.Values + + // Params will be added to the voice callback URL + Params url.Values +} + +func (sms *SMSOptions) apply(v url.Values) { + if sms == nil { + return + } + if sms.ValidityPeriod != 0 { + v.Set("ValidityPeriod", strconv.FormatFloat(sms.ValidityPeriod.Seconds(), 'f', -1, 64)) + } +} + +func (voice *VoiceOptions) apply(v url.Values) { + if voice == nil { + return + } + if voice.ValidityPeriod != 0 { + v.Set("ValidityPeriod", strconv.FormatFloat(voice.ValidityPeriod.Seconds(), 'f', -1, 64)) + } +} + +func urlJoin(base string, parts ...string) string { + base = strings.TrimSuffix(base, "/") + for i, p := range parts { + parts[i] = url.PathEscape(strings.Trim(p, "/")) + } + return base + "/" + strings.Join(parts, "/") +} +func (c *Config) url(parts ...string) string { + base := c.APIURL + if base == "" { + base = DefaultTwilioAPIURL + } + return urlJoin(base, parts...) +} +func (c *Config) httpClient() *http.Client { + if c.Client != nil { + return c.Client + } + + return http.DefaultClient +} +func (c *Config) get(ctx context.Context, urlStr string) (*http.Response, error) { + req, err := http.NewRequest("GET", urlStr, nil) + if err != nil { + return nil, err + } + cfg := config.FromContext(ctx) + req = req.WithContext(ctx) + req.Header.Set("X-Twilio-Signature", string(Signature(cfg.Twilio.AuthToken, urlStr, nil))) + req.SetBasicAuth(cfg.Twilio.AccountSID, cfg.Twilio.AuthToken) + req.Header.Add("Content-Type", "application/x-www-form-urlencoded") + + return c.httpClient().Do(req) +} +func (c *Config) post(ctx context.Context, urlStr string, v url.Values) (*http.Response, error) { + req, err := http.NewRequest("POST", urlStr, bytes.NewBufferString(v.Encode())) + if err != nil { + return nil, err + } + cfg := config.FromContext(ctx) + req = req.WithContext(ctx) + req.Header.Set("X-Twilio-Signature", string(Signature(cfg.Twilio.AuthToken, urlStr, v))) + req.SetBasicAuth(cfg.Twilio.AccountSID, cfg.Twilio.AuthToken) + req.Header.Add("Content-Type", "application/x-www-form-urlencoded") + return c.httpClient().Do(req) +} + +// GetSMS will return the current state of a Message from Twilio. +func (c *Config) GetSMS(ctx context.Context, sid string) (*Message, error) { + cfg := config.FromContext(ctx) + urlStr := c.url("Accounts", cfg.Twilio.AccountSID, "Messages", sid+".json") + resp, err := c.get(ctx, urlStr) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode != 200 { + var e Exception + err = json.Unmarshal(data, &e) + if err != nil { + return nil, errors.Wrap(err, "parse error response") + } + return nil, &e + } + + var m Message + err = json.Unmarshal(data, &m) + if err != nil { + return nil, errors.Wrap(err, "parse message response") + } + + return &m, nil +} + +// GetVoice will return the current state of a voice call from Twilio. +func (c *Config) GetVoice(ctx context.Context, sid string) (*Call, error) { + cfg := config.FromContext(ctx) + urlStr := c.url("Accounts", cfg.Twilio.AccountSID, "Calls", sid+".json") + resp, err := c.post(ctx, urlStr, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode != 200 { + var e Exception + err = json.Unmarshal(data, &e) + if err != nil { + return nil, errors.Wrap(err, "parse error response") + } + return nil, &e + } + + var v Call + err = json.Unmarshal(data, &v) + if err != nil { + return nil, errors.Wrap(err, "parse voice call response") + } + return &v, nil +} + +// CallbackURL will return the callback url for the given configuration. +func (voice *VoiceOptions) CallbackURL(cfg config.Config) (string, error) { + if voice == nil { + voice = &VoiceOptions{} + } + if voice.CallType == "" { + return "", errors.New("CallType missing") + } + return cfg.CallbackURL("/api/v2/twilio/call?type="+url.QueryEscape(string(voice.CallType)), voice.CallbackParams, voice.Params), nil +} + +// StatusCallbackURL will return the status callback url for the given configuration. +func (voice *VoiceOptions) StatusCallbackURL(cfg config.Config) (string, error) { + if voice == nil { + voice = &VoiceOptions{} + } + return cfg.CallbackURL("/api/v2/twilio/call/status", voice.CallbackParams), nil +} + +// StatusCallbackURL will return the status callback url for the given configuration. +func (sms *SMSOptions) StatusCallbackURL(cfg config.Config) (string, error) { + if sms == nil { + sms = &SMSOptions{} + } + return cfg.CallbackURL("/api/v2/twilio/message/status", sms.CallbackParams), nil +} + +// StartVoice will initiate a voice call to the given number. +func (c *Config) StartVoice(ctx context.Context, to string, o *VoiceOptions) (*Call, error) { + cfg := config.FromContext(ctx) + v := make(url.Values) + v.Set("To", to) + v.Set("From", cfg.Twilio.FromNumber) + stat, err := o.StatusCallbackURL(cfg) + if err != nil { + return nil, errors.Wrap(err, "build status callback URL") + } + v.Set("StatusCallback", stat) + + voiceCallbackURL, err := o.CallbackURL(cfg) + if err != nil { + return nil, errors.Wrap(err, "build voice callback URL") + } + v.Set("Url", voiceCallbackURL) + v.Set("FallbackUrl", voiceCallbackURL) + v.Add("StatusCallbackEvent", "initiated") + v.Add("StatusCallbackEvent", "ringing") + v.Add("StatusCallbackEvent", "answered") + v.Add("StatusCallbackEvent", "completed") + o.apply(v) + urlStr := c.url("Accounts", cfg.Twilio.AccountSID, "Calls.json") + + resp, err := c.post(ctx, urlStr, v) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + if resp.StatusCode != 201 { + var e Exception + err = json.Unmarshal(data, &e) + if err != nil { + return nil, errors.Wrap(err, "parse error response") + } + return nil, &e + } + + var call Call + err = json.Unmarshal(data, &call) + if err != nil { + return nil, errors.Wrap(err, "parse voice call response") + } + if call.ErrorMessage != nil && call.ErrorCode != nil { + return &call, &Exception{ + Status: resp.StatusCode, + Message: *call.ErrorMessage, + Code: int(*call.ErrorCode), + } + } + return &call, nil +} + +// SendSMS will send an SMS using Twilio. +func (c *Config) SendSMS(ctx context.Context, to, body string, o *SMSOptions) (*Message, error) { + cfg := config.FromContext(ctx) + v := make(url.Values) + v.Set("To", to) + v.Set("From", cfg.Twilio.FromNumber) + v.Set("Body", body) + + stat, err := o.StatusCallbackURL(cfg) + if err != nil { + return nil, errors.Wrap(err, "build status callback URL") + } + v.Set("StatusCallback", stat) + o.apply(v) + urlStr := c.url("Accounts", cfg.Twilio.AccountSID, "Messages.json") + + resp, err := c.post(ctx, urlStr, v) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + if resp.StatusCode != 201 { + var e Exception + err = json.Unmarshal(data, &e) + if err != nil { + return nil, errors.Wrap(err, "parse error response") + } + return nil, &e + } + + var m Message + err = json.Unmarshal(data, &m) + if err != nil { + return nil, errors.Wrap(err, "parse message response") + } + if m.ErrorCode != nil && m.ErrorMessage != nil { + return &m, &Exception{ + Status: resp.StatusCode, + Message: *m.ErrorMessage, + Code: int(*m.ErrorCode), + } + } + + return &m, nil +} diff --git a/notification/twilio/config.go b/notification/twilio/config.go new file mode 100644 index 0000000000..bc8bbc97d9 --- /dev/null +++ b/notification/twilio/config.go @@ -0,0 +1,21 @@ +package twilio + +import ( + "net/http" +) + +const ( + msgParamID = "msgID" + msgParamSubID = "msgSubjectID" + msgParamBody = "msgBody" +) + +// Config contains the details needed to interact with Twilio for SMS +type Config struct { + + // APIURL can be used to override the Twilio API URL + APIURL string + + // Client is an optional net/http client to use, if nil the global default is used. + Client *http.Client +} diff --git a/notification/twilio/dbban.go b/notification/twilio/dbban.go new file mode 100644 index 0000000000..7c9e394cb0 --- /dev/null +++ b/notification/twilio/dbban.go @@ -0,0 +1,66 @@ +package twilio + +import ( + "context" + "database/sql" + "fmt" + "time" + + "github.com/lib/pq" + "github.com/target/goalert/util" +) + +const ( + banErrorCount = 20 + banDuration = 4 * time.Hour +) + +type dbBan struct { + db *sql.DB + insert *sql.Stmt + isBanned *sql.Stmt + c *Config +} + +func newBanDB(ctx context.Context, db *sql.DB, c *Config, tableName string) (*dbBan, error) { + p := &util.Prepare{DB: db, Ctx: ctx} + + // will register these sql statements by Prepared statements + return &dbBan{ + db: db, + c: c, + insert: p.P(fmt.Sprintf(` + INSERT INTO %s ( + phone_number, + outgoing, + error_message + ) + VALUES + ($1, $2, $3) + `, pq.QuoteIdentifier(tableName))), + isBanned: p.P(fmt.Sprintf(` + select count(1) + from %s + where outgoing = $1 + and phone_number = $2 + and occurred_at between + (now() - '%f minutes'::interval) and now() + `, + pq.QuoteIdentifier(tableName), + banDuration.Minutes(), + )), + }, p.Err +} +func (db *dbBan) IsBanned(ctx context.Context, number string, outgoing bool) (bool, error) { + row := db.isBanned.QueryRowContext(ctx, outgoing, number) + var count int + err := row.Scan(&count) + if err != nil && err != sql.ErrNoRows { + return false, err + } + return count >= banErrorCount, nil +} +func (db *dbBan) RecordError(ctx context.Context, number string, outgoing bool, message string) error { + _, err := db.insert.ExecContext(ctx, number, outgoing, message) + return err +} diff --git a/notification/twilio/dbsms.go b/notification/twilio/dbsms.go new file mode 100644 index 0000000000..c7541dba7f --- /dev/null +++ b/notification/twilio/dbsms.go @@ -0,0 +1,136 @@ +package twilio + +import ( + "context" + "database/sql" + "github.com/target/goalert/util" +) + +type dbSMS struct { + db *sql.DB + + lock *sql.Stmt + insert *sql.Stmt + lookupByCode *sql.Stmt + lookupLatest *sql.Stmt + existingCode *sql.Stmt + + lookupByAlert *sql.Stmt + + getInUse *sql.Stmt +} + +func newDB(ctx context.Context, db *sql.DB) (*dbSMS, error) { + prep := &util.Prepare{DB: db, Ctx: ctx} + p := prep.P + + // will register these sql statements by Prepared statements + return &dbSMS{ + db: db, + + lock: p(`LOCK twilio_sms_callbacks IN SHARE UPDATE EXCLUSIVE MODE`), + + getInUse: p(` + SELECT cb.code + FROM twilio_sms_callbacks cb + JOIN alerts a ON a.id = cb.alert_id AND a.status != 'closed' + WHERE phone_number = $1 + `), + + existingCode: p(` + SELECT cb.code + FROM twilio_sms_callbacks cb + JOIN alerts a ON a.id = cb.alert_id AND a.status != 'closed' + WHERE phone_number = $1 AND cb.alert_id = $2 + `), + + insert: p(` + INSERT INTO twilio_sms_callbacks (phone_number, callback_id, code, alert_id) + VALUES ($1, $2, $3, $4) + ON CONFLICT (phone_number, code) DO UPDATE + SET + callback_id = $2, + alert_id = $4, + sent_at = now() + `), + + lookupByCode: p(`SELECT callback_id, alert_id FROM twilio_sms_callbacks WHERE phone_number = $1 AND code = $2`), + lookupByAlert: p(`SELECT callback_id FROM twilio_sms_callbacks WHERE phone_number = $1 AND alert_id = $2`), + + lookupLatest: p(` + SELECT callback_id, alert_id + FROM twilio_sms_callbacks + WHERE phone_number = $1 + ORDER BY sent_at DESC + LIMIT 1 + `), + }, prep.Err +} + +func (db *dbSMS) insertDB(ctx context.Context, phoneNumber string, callbackID string, alertID int) (int, error) { + tx, err := db.db.BeginTx(ctx, nil) + if err != nil { + return 0, err + } + defer tx.Rollback() + _, err = tx.StmtContext(ctx, db.lock).ExecContext(ctx) + if err != nil { + return 0, err + } + + var existingCode sql.NullInt64 + err = tx.StmtContext(ctx, db.existingCode).QueryRowContext(ctx, phoneNumber, alertID).Scan(&existingCode) + if err == sql.ErrNoRows { + err = nil + } + if err != nil { + return 0, err + } + if existingCode.Valid { + return int(existingCode.Int64), nil + } + + rows, err := tx.StmtContext(ctx, db.getInUse).QueryContext(ctx, phoneNumber) + if err != nil { + return 0, err + } + defer rows.Close() + m := make(map[int]struct{}) + for rows.Next() { + var code int + err = rows.Scan(&code) + if err != nil { + return 0, err + } + m[code] = struct{}{} + } + code := 1 + for { + if _, ok := m[code]; !ok { + break + } + code++ + } + + _, err = tx.StmtContext(ctx, db.insert).ExecContext(ctx, phoneNumber, callbackID, code, alertID) + if err != nil { + return 0, err + } + + return code, tx.Commit() +} + +func (db *dbSMS) LookupByCode(ctx context.Context, phoneNumber string, code int) (callbackID string, alertID int, err error) { + var row *sql.Row + if code != 0 { + row = db.lookupByCode.QueryRowContext(ctx, phoneNumber, code) + } else { + row = db.lookupLatest.QueryRowContext(ctx, phoneNumber) + } + err = row.Scan(&callbackID, &alertID) + return callbackID, alertID, err +} +func (db *dbSMS) LookupByAlertID(ctx context.Context, phoneNumber string, searchID int) (callbackID string, alertID int, err error) { + err = db.lookupByAlert.QueryRowContext(ctx, phoneNumber, searchID).Scan(&callbackID) + return callbackID, searchID, err +} diff --git a/notification/twilio/exception.go b/notification/twilio/exception.go new file mode 100644 index 0000000000..9f77cbf522 --- /dev/null +++ b/notification/twilio/exception.go @@ -0,0 +1,15 @@ +package twilio + +import "fmt" + +// Exception contains information on a Twilio error. +type Exception struct { + Status int + Message string + Code int + MoreInfo string `json:"more_info"` +} + +func (e *Exception) Error() string { + return fmt.Sprintf("%d: %s", e.Code, e.Message) +} diff --git a/notification/twilio/headerhack.go b/notification/twilio/headerhack.go new file mode 100644 index 0000000000..c07c191d10 --- /dev/null +++ b/notification/twilio/headerhack.go @@ -0,0 +1,44 @@ +package twilio + +import ( + "io" + "net/http" + + "github.com/felixge/httpsnoop" +) + +// WrapHeaderHack wraps an http.Handler so that a 204 is returned if the body is empty. +// +// A Go 1.10 change removed the implicit header for responses with no content. Unfortunately +// Twilio logs empty responses (with no `Content-Type`) as 502s. +func WrapHeaderHack(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + var wrote bool + ww := httpsnoop.Wrap(w, httpsnoop.Hooks{ + Write: func(next httpsnoop.WriteFunc) httpsnoop.WriteFunc { + wrote = true + return next + }, + WriteHeader: func(next httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { + wrote = true + return next + }, + ReadFrom: func(next httpsnoop.ReadFromFunc) httpsnoop.ReadFromFunc { + wrote = true + return func(src io.Reader) (int64, error) { + n, err := next(src) + if n > 0 { + wrote = true + } + return n, err + } + }, + }) + + h.ServeHTTP(ww, req) + + if !wrote { + w.WriteHeader(204) + } + }) +} diff --git a/notification/twilio/message.go b/notification/twilio/message.go new file mode 100644 index 0000000000..f57ec76d41 --- /dev/null +++ b/notification/twilio/message.go @@ -0,0 +1,101 @@ +package twilio + +import ( + "fmt" + "github.com/target/goalert/notification" +) + +// MessageStatus indicates the state of a message. +// +// https://www.twilio.com/docs/api/messaging/message#message-status-values +type MessageStatus string + +// Defined status values for messages. +const ( + MessageStatusUnknown = MessageStatus("") + MessageStatusAccepted = MessageStatus("accepted") + MessageStatusQueued = MessageStatus("queued") + MessageStatusSending = MessageStatus("sending") + MessageStatusSent = MessageStatus("sent") + MessageStatusReceiving = MessageStatus("receiving") + MessageStatusReceived = MessageStatus("received") + MessageStatusDelivered = MessageStatus("delivered") + MessageStatusUndelivered = MessageStatus("undelivered") + MessageStatusFailed = MessageStatus("failed") +) + +// Scan implements the sql.Scanner interface. +func (s *MessageStatus) Scan(value interface{}) error { + switch t := value.(type) { + case []byte: + *s = MessageStatus(t) + case string: + *s = MessageStatus(t) + case nil: + *s = MessageStatusUnknown + default: + return fmt.Errorf("could not process unknown type for Status(%T)", t) + } + return nil +} + +// A MessageErrorCode is a defined error code for Twilio messages. +// +// https://www.twilio.com/docs/api/messaging/message#delivery-related-errors +type MessageErrorCode int + +// Defined error codes for messages. +const ( + MessageErrorCodeQueueOverflow = MessageErrorCode(30001) + MessageErrorCodeAccountSuspended = MessageErrorCode(30002) + MessageErrorCodeHandsetUnreachable = MessageErrorCode(30003) + MessageErrorCodeMessageBlocked = MessageErrorCode(30004) + MessageErrorCodeHandsetUnknown = MessageErrorCode(30005) + MessageErrorCodeLandlineUnreachable = MessageErrorCode(30006) + MessageErrorCodeCarrierViolation = MessageErrorCode(30007) + MessageErrorCodeUnknown = MessageErrorCode(30008) + MessageErrorCodeMissingSegment = MessageErrorCode(30009) + MessageErrorCodeExceedsMaxPrice = MessageErrorCode(30010) +) + +// Message represents a Twilio message. +type Message struct { + SID string + To string + From string + Status MessageStatus + ErrorCode *MessageErrorCode + ErrorMessage *string +} + +func (msg *Message) messageStatus(id string) *notification.MessageStatus { + if msg == nil { + return nil + } + + status := ¬ification.MessageStatus{ + ID: id, + ProviderMessageID: msg.SID, + } + if msg.ErrorMessage != nil && msg.ErrorCode != nil { + status.Details = fmt.Sprintf("%s: [%d] %s", msg.Status, *msg.ErrorCode, *msg.ErrorMessage) + } else { + status.Details = string(msg.Status) + } + switch msg.Status { + case MessageStatusFailed: + if msg.ErrorCode != nil && + (*msg.ErrorCode == 30008 || *msg.ErrorCode == 30001) { + + status.State = notification.MessageStateFailedTemp + } + status.State = notification.MessageStateFailedPerm + case MessageStatusDelivered: + status.State = notification.MessageStateDelivered + case MessageStatusSent, MessageStatusUndelivered: + status.State = notification.MessageStateSent + default: + status.State = notification.MessageStateActive + } + return status +} diff --git a/notification/twilio/signature.go b/notification/twilio/signature.go new file mode 100644 index 0000000000..b0eb39aef3 --- /dev/null +++ b/notification/twilio/signature.go @@ -0,0 +1,38 @@ +package twilio + +import ( + "bytes" + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "io" + "net/url" + "sort" +) + +// Signature will calculate the raw signature for a request from Twilio. +// https://www.twilio.com/docs/api/security#validating-requests +func Signature(authToken, url string, fields url.Values) []byte { + buf := new(bytes.Buffer) + buf.WriteString(url) + + fieldNames := make(sort.StringSlice, 0, len(fields)) + for name := range fields { + fieldNames = append(fieldNames, name) + } + fieldNames.Sort() + + for _, fieldName := range fieldNames { + buf.WriteString(fieldName + fields.Get(fieldName)) + } + + hash := hmac.New(sha1.New, []byte(authToken)) + io.Copy(hash, buf) + + buf.Reset() + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(hash.Sum(nil)) + enc.Close() + + return buf.Bytes() +} diff --git a/notification/twilio/signature_test.go b/notification/twilio/signature_test.go new file mode 100644 index 0000000000..f70e916702 --- /dev/null +++ b/notification/twilio/signature_test.go @@ -0,0 +1,33 @@ +package twilio + +import ( + "net/url" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSignature(t *testing.T) { + + // From twilio docs + // https://www.twilio.com/docs/api/security#validating-requests + + const ( + reqURL = "https://mycompany.com/myapp.php?foo=1&bar=2" + authToken = "12345" + + // Twilio's example code seems to be incorrect (includes an extra `=`) + // so this is different than the test example. + expectedSignature = "GvWf1cFY/Q7PnoempGyD5oXAezc=" + ) + + v := make(url.Values) + v.Set("Digits", "1234") + v.Set("To", "+18005551212") + v.Set("From", "+14158675310") + v.Set("Caller", "+14158675310") + v.Set("CallSid", "CA1234567890ABCDE") + + sig := Signature(authToken, reqURL, v) + assert.Equal(t, expectedSignature, string(sig)) +} diff --git a/notification/twilio/sms.go b/notification/twilio/sms.go new file mode 100644 index 0000000000..190980c479 --- /dev/null +++ b/notification/twilio/sms.go @@ -0,0 +1,386 @@ +package twilio + +import ( + "context" + "database/sql" + "fmt" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" + "time" + + "github.com/target/goalert/alert" + "github.com/target/goalert/config" + "github.com/target/goalert/notification" + "github.com/target/goalert/permission" + "github.com/target/goalert/retry" + "github.com/target/goalert/util/log" + + "github.com/pkg/errors" +) + +var ( + lastReplyRx = regexp.MustCompile(`^'?\s*(c|close|a|ack[a-z]*)\s*'?$`) + shortReplyRx = regexp.MustCompile(`^'?\s*([0-9]+)\s*(c|a)\s*'?$`) + alertReplyRx = regexp.MustCompile(`^'?\s*(c|close|a|ack[a-z]*)\s*#?\s*([0-9]+)\s*'?$`) +) + +// SMS implements a notification.Sender for Twilio SMS. +type SMS struct { + b *dbSMS + c *Config + + respCh chan *notification.MessageResponse + statCh chan *notification.MessageStatus + + ban *dbBan +} + +// NewSMS performs operations like validating essential parameters, registering the Twilio client and db +// and adding routes for successful and unsuccessful message delivery to Twilio +func NewSMS(ctx context.Context, db *sql.DB, c *Config) (*SMS, error) { + b, err := newDB(ctx, db) + if err != nil { + return nil, err + } + + s := &SMS{ + b: b, + c: c, + respCh: make(chan *notification.MessageResponse), + statCh: make(chan *notification.MessageStatus, 10), + } + s.ban, err = newBanDB(ctx, db, c, "twilio_sms_errors") + if err != nil { + return nil, errors.Wrap(err, "init Twilio SMS DB") + } + + return s, nil +} + +// Status provides the current status of a message. +func (s *SMS) Status(ctx context.Context, id, providerID string) (*notification.MessageStatus, error) { + msg, err := s.c.GetSMS(ctx, providerID) + if err != nil { + return nil, err + } + return msg.messageStatus(id), nil +} + +// ListenStatus will return a channel that is fed async status updates. +func (s *SMS) ListenStatus() <-chan *notification.MessageStatus { return s.statCh } + +// ListenResponse will return a channel that is fed async message responses. +func (s *SMS) ListenResponse() <-chan *notification.MessageResponse { return s.respCh } + +// Send implements the notification.Sender interface. +func (s *SMS) Send(ctx context.Context, msg notification.Message) (*notification.MessageStatus, error) { + cfg := config.FromContext(ctx) + if !cfg.Twilio.Enable { + return nil, errors.New("Twilio provider is disabled") + } + if msg.Destination().Type != notification.DestTypeSMS { + return nil, errors.Errorf("unsupported destination type %s; expected SMS", msg.Destination().Type) + } + destNumber := msg.Destination().Value + if !supportedCountryCode(destNumber) { + return nil, errors.New("unsupported country code") + } + + if destNumber == cfg.Twilio.FromNumber { + return nil, errors.New("refusing to send outgoing SMS to FromNumber") + } + + ctx = log.WithFields(ctx, log.Fields{ + "Phone": destNumber, + "Type": "TwilioSMS", + }) + + b, err := s.ban.IsBanned(ctx, destNumber, true) + if err != nil { + return nil, errors.Wrap(err, "check ban status") + } + if b { + return nil, errors.New("number had too many outgoing errors recently") + } + + var message string + switch msg.Type() { + case notification.MessageTypeAlertStatus: + message, err = alertSMS{ + ID: msg.SubjectID(), + Body: msg.Body(), + }.Render() + case notification.MessageTypeAlert: + var code int + if hasTwoWaySMSSupport(destNumber) { + code, err = s.b.insertDB(ctx, destNumber, msg.ID(), msg.SubjectID()) + if err != nil { + log.Log(ctx, errors.Wrap(err, "insert alert id for SMS callback -- sending 1-way SMS as fallback")) + } + } + + message, err = alertSMS{ + ID: msg.SubjectID(), + Body: msg.Body(), + Code: code, + Link: cfg.CallbackURL("/alerts/" + strconv.Itoa(msg.SubjectID())), + }.Render() + case notification.MessageTypeTest: + message = fmt.Sprintf("This is a test message from GoAlert.") + case notification.MessageTypeVerification: + message = fmt.Sprintf("GoAlert verification code: %d", msg.SubjectID()) + default: + return nil, errors.Errorf("unhandled message type %s", msg.Type().String()) + } + if err != nil { + return nil, errors.Wrap(err, "render message") + } + + opts := &SMSOptions{ + ValidityPeriod: time.Second * 10, + CallbackParams: make(url.Values), + } + opts.CallbackParams.Set(msgParamID, msg.ID()) + // Actually send notification to end user & receive Message Status + resp, err := s.c.SendSMS(ctx, destNumber, message, opts) + if err != nil { + return nil, errors.Wrap(err, "send message") + } + + return resp.messageStatus(msg.ID()), nil +} + +func (s *SMS) ServeStatusCallback(w http.ResponseWriter, req *http.Request) { + if disabled(w, req) { + return + } + ctx := req.Context() + status := MessageStatus(req.FormValue("MessageStatus")) + sid := validSID(req.FormValue("MessageSid")) + number := validPhone(req.FormValue("To")) + if status == "" || sid == "" || number == "" { + http.Error(w, "", http.StatusBadRequest) + return + } + + ctx = log.WithFields(ctx, log.Fields{ + "Status": status, + "SID": sid, + "Phone": number, + "Type": "TwilioSMS", + }) + msg := Message{SID: sid, Status: status} + + log.Debugf(ctx, "Got Twilio SMS status callback.") + + s.statCh <- msg.messageStatus(req.URL.Query().Get(msgParamID)) + + if status != MessageStatusFailed { + // ignore other types + return + } + + err := s.ban.RecordError(context.Background(), number, true, "send failed") + if err != nil { + log.Log(ctx, errors.Wrap(err, "record error")) + } + +} + +func (s *SMS) ServeMessage(w http.ResponseWriter, req *http.Request) { + if disabled(w, req) { + return + } + ctx := req.Context() + cfg := config.FromContext(ctx) + from := validPhone(req.FormValue("From")) + if from == "" || from == cfg.Twilio.FromNumber { + http.Error(w, "", http.StatusBadRequest) + return + } + + ctx = log.WithFields(ctx, log.Fields{ + "Number": from, + "Type": "TwilioSMS", + }) + + respond := func(errMsg string, msg string) { + if errMsg != "" { + err := s.ban.RecordError(context.Background(), from, false, errMsg) + if err != nil { + log.Log(ctx, errors.Wrap(err, "record error")) + } + } + _, err := s.c.SendSMS(ctx, from, msg, nil) + if err != nil { + log.Log(ctx, errors.Wrap(err, "send response")) + } + // TODO: we should track & queue these + // (maybe the engine should generate responses instead) + } + var banned bool + var err error + err = retry.DoTemporaryError(func(int) error { + banned, err = s.ban.IsBanned(ctx, from, false) + return errors.Wrap(err, "look up ban status") + }, + retry.Log(ctx), + retry.Limit(10), + retry.FibBackoff(time.Second), + ) + if err != nil { + log.Log(ctx, err) + respond("", "System error. Visit the dashboard to manage alerts.") + return + } + if banned { + http.Error(w, "", http.StatusTooManyRequests) + return + } + + body := req.FormValue("Body") + if strings.Contains(strings.ToLower(body), "stop") { + err := retry.DoTemporaryError(func(int) error { + errCh := make(chan error, 1) + s.respCh <- ¬ification.MessageResponse{ + Ctx: ctx, + From: notification.Dest{Type: notification.DestTypeSMS, Value: from}, + Result: notification.ResultStop, + Err: errCh, + } + return errors.Wrap(<-errCh, "process STOP message") + }, + retry.Log(ctx), + retry.Limit(10), + retry.FibBackoff(time.Second), + ) + if err != nil { + log.Log(ctx, err) + } + return + } + + body = strings.TrimSpace(body) + body = strings.ToLower(body) + var lookupFn func() (string, int, error) + var result notification.Result + + if m := lastReplyRx.FindStringSubmatch(body); len(m) == 2 { + if strings.HasPrefix(m[1], "a") { + result = notification.ResultAcknowledge + } else { + result = notification.ResultResolve + } + lookupFn = func() (string, int, error) { return s.b.LookupByCode(ctx, from, 0) } + } else if m := shortReplyRx.FindStringSubmatch(body); len(m) == 3 { + if strings.HasPrefix(m[2], "a") { + result = notification.ResultAcknowledge + } else { + result = notification.ResultResolve + } + code, err := strconv.Atoi(m[1]) + if err != nil { + log.Debug(ctx, errors.Wrap(err, "parse code")) + } else { + ctx = log.WithField(ctx, "Code", code) + lookupFn = func() (string, int, error) { return s.b.LookupByCode(ctx, from, code) } + } + } else if m := alertReplyRx.FindStringSubmatch(body); len(m) == 3 { + if strings.HasPrefix(m[1], "a") { + result = notification.ResultAcknowledge + } else { + result = notification.ResultResolve + } + alertID, err := strconv.Atoi(m[2]) + if err != nil { + log.Debug(ctx, errors.Wrap(err, "parse alertID")) + } else { + ctx = log.WithField(ctx, "AlertID", alertID) + lookupFn = func() (string, int, error) { return s.b.LookupByAlertID(ctx, from, alertID) } + } + } + + if lookupFn == nil { + respond("unknown action", "Sorry, but that isn't a request GoAlert understood. Visit the Web UI for more information. To unsubscribe, reply with STOP.") + ctx = log.WithField(ctx, "SMSBody", body) + log.Debug(ctx, errors.Wrap(err, "parse alert action")) + return + } + + var prefix string + if result == notification.ResultAcknowledge { + prefix = "Acknowledged" + } else { + prefix = "Closed" + } + + var nonSystemErr bool + + var alertID int + err = retry.DoTemporaryError(func(int) error { + callbackID, aID, err := lookupFn() + if err != nil { + return errors.Wrap(err, "lookup callbackID") + } + alertID = aID + + errCh := make(chan error, 1) + s.respCh <- ¬ification.MessageResponse{ + Ctx: ctx, + ID: callbackID, + From: notification.Dest{Type: notification.DestTypeSMS, Value: from}, + Result: result, + Err: errCh, + } + return errors.Wrap(<-errCh, "process notification response") + }, + retry.Log(ctx), + retry.Limit(10), + retry.FibBackoff(time.Second), + ) + ctx = log.WithField(ctx, "AlertID", alertID) + + if errors.Cause(err) == sql.ErrNoRows { + respond("unknown callbackID", "Unknown alert code for this number. Visit the dashboard to manage alerts.") + return + } + + msg := "System error. Visit the dashboard to manage alerts." + if alert.IsAlreadyClosed(err) { + nonSystemErr = true + msg = fmt.Sprintf("Alert #%d already closed", alertID) + } else if alert.IsAlreadyAcknowledged(err) { + nonSystemErr = true + msg = fmt.Sprintf("Alert #%d already acknowledged", alertID) + } + + if nonSystemErr { + // alert store returns the special error struct, twilio checks if it's special, and if so, pulls the log entry + if e, ok := errors.Cause(err).(alert.LogEntryFetcher); ok { + err = nil + // we pass a 'sudo' context to give permission + permission.SudoContext(ctx, func(sCtx context.Context) { + entry, err := e.LogEntry(sCtx) + if err != nil { + log.Log(sCtx, errors.Wrap(err, "fetch log entry")) + } else { + msg += "\n\n" + entry.String() + } + }) + } + log.Log(ctx, errors.Wrap(err, "process notification response")) + respond("", msg) + return + } + + if err != nil { + log.Log(ctx, err) + respond("", msg) + return + } + + respond("", fmt.Sprintf("%s alert #%d", prefix, alertID)) +} diff --git a/notification/twilio/validation.go b/notification/twilio/validation.go new file mode 100644 index 0000000000..2f9bf991dd --- /dev/null +++ b/notification/twilio/validation.go @@ -0,0 +1,89 @@ +package twilio + +import ( + "context" + "crypto/hmac" + "github.com/target/goalert/config" + "github.com/target/goalert/util/log" + "net/http" + "regexp" + "strings" + + "github.com/pkg/errors" +) + +type contextKey string + +const twilioAlreadyValidated = contextKey("already-validated") + +func validateRequest(req *http.Request) error { + if req.Method == "POST" { + req.ParseForm() + } + ctx := req.Context() + cfg := config.FromContext(ctx) + + sig := req.Header.Get("X-Twilio-Signature") + if sig == "" { + return errors.New("missing X-Twilio-Signature") + } + + calcSig := Signature(cfg.Twilio.AuthToken, cfg.CallbackURL(req.URL.String()), req.PostForm) + if !hmac.Equal([]byte(sig), calcSig) { + return errors.New("invalid X-Twilio-Signature") + } + + return nil +} + +// WrapValidation will wrap an http.Handler to do X-Twilio-Signature checking. +func WrapValidation(h http.Handler, c Config) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + + if val, ok := ctx.Value(twilioAlreadyValidated).(bool); ok && val { + // only validate once + h.ServeHTTP(w, req) + return + } + + err := validateRequest(req) + if err != nil { + log.Log(ctx, err) + http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) + return + } + + h.ServeHTTP(w, req.WithContext(context.WithValue(ctx, twilioAlreadyValidated, true))) + }) +} + +var numRx = regexp.MustCompile(`^\+\d{1,15}$`) +var sidRx = regexp.MustCompile(`^(CA|SM)[\da-f]{32}$`) + +func validPhone(n string) string { + if !numRx.MatchString(n) { + return "" + } + + return n +} +func validSID(n string) string { + if len(n) != 34 { + return "" + } + if !sidRx.MatchString(n) { + return "" + } + + return n +} + +// Supported Country Codes +// +1 = USA, +91 = India +func supportedCountryCode(n string) bool { + if strings.HasPrefix(n, "+1") || strings.HasPrefix(n, "+91") { + return true + } + return false +} diff --git a/notification/twilio/voice.go b/notification/twilio/voice.go new file mode 100644 index 0000000000..9b2015b2bc --- /dev/null +++ b/notification/twilio/voice.go @@ -0,0 +1,702 @@ +package twilio + +import ( + "context" + "database/sql" + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/target/goalert/alert" + "github.com/target/goalert/config" + "github.com/target/goalert/notification" + "github.com/target/goalert/permission" + "github.com/target/goalert/retry" + "github.com/target/goalert/util/errutil" + "github.com/target/goalert/util/log" +) + +// CallType indicates a supported Twilio voice call type. +type CallType string + +// Supported call types. +const ( + CallTypeAlert = CallType("alert") + CallTypeAlertStatus = CallType("alert-status") + CallTypeTest = CallType("test") + CallTypeVerify = CallType("verify") + CallTypeStop = CallType("stop") +) + +// We use url encoding with no padding to try and eliminate +// encoding problems with buggy apps. +var b64enc = base64.URLEncoding.WithPadding(base64.NoPadding) + +// KeyPressed specifies a key pressed from the voice menu options. +type KeyPressed string + +// Possible keys pressed from the Menu mapped to their actions. +const ( + digitAck = "4" + digitClose = "6" + digitStop = "1" + digitGoBack = "1" + digitRepeat = "*" + digitConfirm = "3" + digitOldAck = "8" + digitOldClose = "9" +) + +var pRx = regexp.MustCompile(`\((.*?)\)`) + +// Voice implements a notification.Sender for Twilio voice calls. +type Voice struct { + c *Config + ban *dbBan + + respCh chan *notification.MessageResponse + statCh chan *notification.MessageStatus +} + +type gather struct { + XMLName xml.Name `xml:"Gather,omitempty"` + Action string `xml:"action,attr,omitempty"` + Method string `xml:"method,attr,omitempty"` + NumDigits int `xml:"numDigits,attr,omitempty"` + Say string `xml:"Say,omitempty"` +} + +type twiMLRedirect struct { + XMLName xml.Name `xml:"Response"` + RedirectURL string `xml:"Redirect"` +} + +type twiMLRetry struct { + XMLName xml.Name `xml:"Response"` + Say string `xml:"Say"` + Pause struct { + Seconds int `xml:"length,attr"` + } `xml:"Pause"` + RedirectURL string `xml:"Redirect"` +} + +type twiMLGather struct { + XMLName xml.Name `xml:"Response"` + Gather *gather +} +type twiMLEnd struct { + XMLName xml.Name `xml:"Response"` + Say string `xml:"Say,omitempty"` + Hangup struct{} +} + +var rmParen = regexp.MustCompile(`\s*\(.*?\)`) + +func voiceErrorMessage(ctx context.Context, err error) (string, error) { + if e, ok := errors.Cause(err).(alert.LogEntryFetcher); ok { + // we pass a 'sudo' context to give permission + var msg string + permission.SudoContext(ctx, func(sCtx context.Context) { + entry, err := e.LogEntry(sCtx) + if err != nil { + log.Log(sCtx, errors.Wrap(err, "fetch log entry")) + } else { + // Stripping off anything in between parenthesis + msg = "Already " + pRx.ReplaceAllString(entry.String(), "") + } + }) + if msg != "" { + return msg, nil + } + } + // In case we don't get a log entry, respond with generic messages. + if alert.IsAlreadyClosed(err) { + return "Alert is already closed.", nil + } + if alert.IsAlreadyAcknowledged(err) { + return "Alert is already acknowledged.", nil + } + // Error is something else. + return "System error. Please visit the dashboard.", err +} + +// NewVoice will send out the initial Call to Twilio, specifying all details needed for Twilio to make the first call to the end user +// It performs operations like validating essential parameters, registering the Twilio client and db +// and adding routes for successful and unsuccessful call connections to Twilio +func NewVoice(ctx context.Context, db *sql.DB, c *Config) (*Voice, error) { + v := &Voice{ + c: c, + + respCh: make(chan *notification.MessageResponse), + statCh: make(chan *notification.MessageStatus, 10), + } + + var err error + v.ban, err = newBanDB(ctx, db, c, "twilio_voice_errors") + if err != nil { + return nil, errors.Wrap(err, "init voice ban DB") + } + + return v, nil +} + +func (v *Voice) ServeCall(w http.ResponseWriter, req *http.Request) { + if disabled(w, req) { + return + } + switch CallType(req.FormValue("type")) { + case CallTypeAlert: + v.ServeAlert(w, req) + case CallTypeAlertStatus: + v.ServeAlertStatus(w, req) + case CallTypeTest: + v.ServeTest(w, req) + case CallTypeStop: + v.ServeStop(w, req) + case CallTypeVerify: + v.ServeVerify(w, req) + default: + http.NotFound(w, req) + } +} + +// Status provides the current status of a message. +func (v *Voice) Status(ctx context.Context, id, providerID string) (*notification.MessageStatus, error) { + call, err := v.c.GetVoice(ctx, providerID) + if err != nil { + return nil, err + } + return call.messageStatus(id), nil +} + +// ListenStatus will return a channel that is fed async status updates. +func (v *Voice) ListenStatus() <-chan *notification.MessageStatus { return v.statCh } + +// ListenResponse will return a channel that is fed async message responses. +func (v *Voice) ListenResponse() <-chan *notification.MessageResponse { return v.respCh } + +// callbackURL returns an absolute URL pointing to the named callback. +// If params is nil, default values from the BaseURL are used. +func (v *Voice) callbackURL(ctx context.Context, params url.Values, typ CallType) string { + cfg := config.FromContext(ctx) + p := make(url.Values) + p.Set("type", string(typ)) + return cfg.CallbackURL("/api/v2/twilio/call", params, p) +} + +func spellNumber(n int) string { + s := strconv.Itoa(n) + + return strings.Join(strings.Split(s, ""), ", ") +} + +// Send implements the notification.Sender interface. +func (v *Voice) Send(ctx context.Context, msg notification.Message) (*notification.MessageStatus, error) { + cfg := config.FromContext(ctx) + if !cfg.Twilio.Enable { + return nil, errors.New("Twilio provider is disabled") + } + toNumber := msg.Destination().Value + if !supportedCountryCode(toNumber) { + return nil, errors.New("unsupported country code") + } + + if toNumber == cfg.Twilio.FromNumber { + return nil, errors.New("refusing to make outgoing call to FromNumber") + } + ctx = log.WithFields(ctx, log.Fields{ + "Number": toNumber, + "Type": "TwilioVoice", + }) + b, err := v.ban.IsBanned(ctx, toNumber, true) + if err != nil { + return nil, errors.Wrap(err, "check ban status") + } + if b { + return nil, errors.New("number had too many outgoing errors recently") + } + + var ep CallType + var message string + + switch msg.Type() { + case notification.MessageTypeAlert: + message = msg.Body() + ep = CallTypeAlert + case notification.MessageTypeAlertStatus: + message = rmParen.ReplaceAllString(msg.Body(), "") + ep = CallTypeAlertStatus + case notification.MessageTypeTest: + message = "This is a test message from GoAlert." + ep = CallTypeTest + case notification.MessageTypeVerification: + message = "Your verification code for GoAlert is: " + spellNumber(msg.SubjectID()) + ep = CallTypeVerify + default: + return nil, errors.Errorf("unhandled message type %s", msg.Type().String()) + } + + if message == "" { + message = "No summary provided." + } + + opts := &VoiceOptions{ + ValidityPeriod: time.Second * 10, + CallType: ep, + CallbackParams: make(url.Values), + Params: make(url.Values), + } + opts.CallbackParams.Set(msgParamID, msg.ID()) + opts.Params.Set(msgParamSubID, strconv.Itoa(msg.SubjectID())) + // Encode the body so we don't need to worry about + // buggy apps not escaping url params properly. + opts.Params.Set(msgParamBody, b64enc.EncodeToString([]byte(message))) + + voiceResponse, err := v.c.StartVoice(ctx, toNumber, opts) + if err != nil { + log.Log(ctx, errors.Wrap(err, "call user")) + return nil, err + } + + return voiceResponse.messageStatus(msg.ID()), nil +} + +func disabled(w http.ResponseWriter, req *http.Request) bool { + ctx := req.Context() + cfg := config.FromContext(ctx) + if !cfg.Twilio.Enable { + log.Log(ctx, errors.New("Twilio provider is disabled")) + http.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden) + return true + } + return false +} + +func (v *Voice) ServeStatusCallback(w http.ResponseWriter, req *http.Request) { + if disabled(w, req) { + return + } + + ctx := req.Context() + status := CallStatus(req.FormValue("CallStatus")) + number := validPhone(req.FormValue("To")) + sid := validSID(req.FormValue("CallSid")) + if status == "" || number == "" || sid == "" { + http.Error(w, "", http.StatusBadRequest) + return + } + + ctx = log.WithFields(ctx, log.Fields{ + "Status": status, + "SID": sid, + "Phone": number, + "Type": "TwilioVoice", + }) + + if status == CallStatusFailed && req.FormValue("SipResponseCode") == "480" { + // treat it as no-answer since callee unreachable instead of failed + status = CallStatusNoAnswer + } + + callState := &Call{ + SID: sid, + Status: status, + To: number, + } + seq, err := strconv.Atoi(req.FormValue("SequenceNumber")) + if err == nil { + callState.SequenceNumber = &seq + } + + v.statCh <- callState.messageStatus(req.URL.Query().Get(msgParamID)) + + // Only update current call we are on, except for failed call + if status != CallStatusFailed { + return + } + + err = v.ban.RecordError(context.Background(), number, true, "send failed") + if err != nil { + log.Log(ctx, errors.Wrap(err, "record error")) + } +} + +type call struct { + Number string + SID string + Digits string + RetryCount int + Outbound bool + Q url.Values + + // Embedded query fields + msgID string + msgSubjectID int + msgBody string +} + +func (v *Voice) ServeStop(w http.ResponseWriter, req *http.Request) { + if disabled(w, req) { + return + } + ctx, call, errResp := v.getCall(w, req) + if call == nil { + return + } + + var messagePrefix string + switch call.Digits { + default: + messagePrefix = "I am sorry. I didn't catch that. " + fallthrough + + case "", digitRepeat: + message := fmt.Sprintf("%sTo confirm unenrollment of this number, press %s. To go back to main menu, press %s. To repeat this message, press %s.", messagePrefix, digitConfirm, digitGoBack, digitRepeat) + g := &gather{ + Action: v.callbackURL(ctx, call.Q, CallTypeStop), + Method: "POST", + NumDigits: 1, + Say: message, + } + renderXML(w, req, twiMLGather{ + Gather: g, + }) + + case digitConfirm: + errCh := make(chan error, 1) + v.respCh <- ¬ification.MessageResponse{ + Ctx: ctx, + From: notification.Dest{Type: notification.DestTypeVoice, Value: call.Number}, + Result: notification.ResultStop, + Err: errCh, + } + + if errResp(false, errors.Wrap(<-errCh, "process STOP response"), "") { + return + } + + renderXML(w, req, twiMLEnd{ + Say: "Unenrolled. Goodbye.", + }) + case digitGoBack: // Go back to main menu + renderXML(w, req, twiMLRedirect{ + RedirectURL: v.callbackURL(ctx, call.Q, CallType(call.Q.Get("previous"))), + }) + } +} + +type errRespFn func(userErr bool, err error, msg string) bool + +func (v *Voice) getCall(w http.ResponseWriter, req *http.Request) (context.Context, *call, errRespFn) { + ctx := req.Context() + cfg := config.FromContext(ctx) + isOutbound := req.FormValue("Direction") == "outbound-api" + var remoteNumRaw string + if isOutbound { + remoteNumRaw = req.FormValue("To") + } else { + remoteNumRaw = req.FormValue("From") + } + callSID := validSID(req.FormValue("CallSid")) + phoneNumber := validPhone(remoteNumRaw) + digits := req.FormValue("Digits") + + if callSID == "" || phoneNumber == "" || phoneNumber == cfg.Twilio.FromNumber { + http.Error(w, "", http.StatusBadRequest) + return nil, nil, nil + } + + q := req.URL.Query() + + retryCount, _ := strconv.Atoi(q.Get("retry_count")) + q.Del("retry_count") // retry_count will only be set again if we go through the errResp + + if !isOutbound { + return ctx, &call{ + Number: phoneNumber, + SID: callSID, + RetryCount: retryCount, + Digits: digits, + Outbound: isOutbound, + Q: q, + }, nil + } + + msgID := q.Get(msgParamID) + subID, _ := strconv.Atoi(q.Get(msgParamSubID)) + bodyData, _ := b64enc.DecodeString(q.Get(msgParamBody)) + if msgID == "" { + log.Log(ctx, errors.Errorf("parse call: query param %s is empty or invalid", msgParamID)) + } + if subID == 0 { + log.Log(ctx, errors.Errorf("parse call: query param %s is empty or invalid", msgParamSubID)) + } + if len(bodyData) == 0 { + log.Log(ctx, errors.Errorf("parse call: query param %s is empty or invalid", msgParamBody)) + } + + if digits == "" { + digits = q.Get("retry_digits") + } + q.Del("retry_digits") + + ctx = log.WithFields(ctx, log.Fields{ + "SID": callSID, + "Phone": phoneNumber, + "Digits": digits, + "Type": "TwilioVoice", + }) + + errResp := func(userErr bool, err error, msg string) bool { + if err == nil { + return false + } + if userErr { + rerr := v.ban.RecordError(context.Background(), phoneNumber, isOutbound, msg) + if rerr != nil { + log.Log(ctx, errors.Wrap(rerr, "record error")) + } + } + + // always log the failure + log.Log(ctx, err) + + if retry.IsTemporaryError(err) && retryCount < 3 { + // schedule a retry + q.Set("retry_count", strconv.Itoa(retryCount+1)) + q.Set("retry_digits", digits) + retry := twiMLRetry{ + Say: "One moment please.", + RedirectURL: v.callbackURL(ctx, q, CallType(q.Get("type"))), + } + retry.Pause.Seconds = 5 + renderXML(w, req, retry) + return true + } + + renderXML(w, req, twiMLEnd{ + Say: "An error has occurred. Please login to the dashboard to manage alerts. Goodbye.", + }) + return true + } + + return ctx, &call{ + Number: phoneNumber, + SID: callSID, + RetryCount: retryCount, + Digits: digits, + Outbound: isOutbound, + Q: q, + + msgID: msgID, + msgSubjectID: subID, + msgBody: string(bodyData), + }, errResp + +} + +func renderXML(w http.ResponseWriter, req *http.Request, v interface{}) { + data, err := xml.Marshal(v) + if errutil.HTTPError(req.Context(), w, err) { + return + } + + w.Header().Set("Content-Type", "application/xml; charset=utf-8") + io.WriteString(w, xml.Header) + w.Write(data) +} + +func (v *Voice) ServeTest(w http.ResponseWriter, req *http.Request) { + if disabled(w, req) { + return + } + ctx, call, _ := v.getCall(w, req) + if call == nil { + return + } + + var messagePrefix string + switch call.Digits { + default: + messagePrefix = "I am sorry. I didn't catch that. " + fallthrough + case "", digitRepeat: + message := fmt.Sprintf("%s%s. To repeat this message, press %s.", messagePrefix, call.msgBody, digitRepeat) + g := &gather{ + Action: v.callbackURL(ctx, call.Q, CallTypeTest), + Method: "POST", + NumDigits: 1, + Say: message, + } + renderXML(w, req, twiMLGather{ + Gather: g, + }) + return + } +} +func (v *Voice) ServeVerify(w http.ResponseWriter, req *http.Request) { + if disabled(w, req) { + return + } + ctx, call, _ := v.getCall(w, req) + if call == nil { + return + } + + var messagePrefix string + switch call.Digits { + default: + messagePrefix = "I am sorry. I didn't catch that. " + fallthrough + case "", digitRepeat: + message := fmt.Sprintf("%s%s. To repeat this message, press %s.", messagePrefix, call.msgBody, digitRepeat) + g := &gather{ + Action: v.callbackURL(ctx, call.Q, CallTypeVerify), + Method: "POST", + NumDigits: 1, + Say: message, + } + renderXML(w, req, twiMLGather{ + Gather: g, + }) + return + } +} + +func (v *Voice) ServeAlertStatus(w http.ResponseWriter, req *http.Request) { + if disabled(w, req) { + return + } + ctx, call, _ := v.getCall(w, req) + if call == nil { + return + } + + var messagePrefix string + switch call.Digits { + default: + messagePrefix = "I am sorry. I didn't catch that. " + fallthrough + case "", digitRepeat: + message := fmt.Sprintf( + "%sStatus update for Alert number %d. %s. To repeat this message, press %s. To unenroll from all notifications, press %s. If you are done, you may simply hang up.", + messagePrefix, call.msgSubjectID, call.msgBody, digitRepeat, digitStop) + g := &gather{ + Action: v.callbackURL(ctx, call.Q, CallTypeAlertStatus), + Method: "POST", + NumDigits: 1, + Say: message, + } + renderXML(w, req, twiMLGather{ + Gather: g, + }) + return + case digitStop: + call.Q.Set("previous", string(CallTypeAlertStatus)) + renderXML(w, req, twiMLRedirect{ + RedirectURL: v.callbackURL(ctx, call.Q, CallTypeStop), + }) + return + } +} + +// ServeAlert serves a call for an alert notification. +func (v *Voice) ServeAlert(w http.ResponseWriter, req *http.Request) { + if disabled(w, req) { + return + } + ctx, call, errResp := v.getCall(w, req) + if call == nil { + return + } + + var err error + if !call.Outbound { + // v.serveVoiceUI(w, req) + err = v.ban.RecordError(context.Background(), call.Number, false, "incoming calls not supported") + if err != nil { + log.Log(ctx, errors.Wrap(err, "record error")) + } + renderXML(w, req, twiMLEnd{ + Say: "Please login to the dashboard to manage alerts. Goodbye.", + }) + return + } + + // See Twilio Request Parameter documentation at + // https://www.twilio.com/docs/api/twiml/twilio_request#synchronous + var messagePrefix string + + switch call.Digits { + default: + if call.Digits == digitOldAck { + messagePrefix = fmt.Sprintf("The menu options have changed. To acknowledge, press %s. ", digitAck) + } else if call.Digits == digitOldClose { + messagePrefix = fmt.Sprintf("The menu options have changed. To close, press %s. ", digitClose) + } else { + messagePrefix = "I am sorry. I didn't catch that. " + } + fallthrough + case "", digitRepeat: + message := fmt.Sprintf( + "%sMessage from Go Alert. %s. To acknowledge, press %s. To close, press %s. To unenroll from all notifications, press %s. To repeat this message, press %s", + messagePrefix, call.msgBody, digitAck, digitClose, digitStop, digitRepeat) + // User wants Twilio to repeat the message + g := &gather{ + Action: v.callbackURL(ctx, call.Q, CallTypeAlert), + Method: "POST", + NumDigits: 1, + Say: message, + } + renderXML(w, req, twiMLGather{ + Gather: g, + }) + return + + case digitStop: + call.Q.Set("previous", string(CallTypeAlert)) + renderXML(w, req, twiMLRedirect{ + RedirectURL: v.callbackURL(ctx, call.Q, CallTypeStop), + }) + return + + case digitAck, digitClose: // Acknowledge and Close cases + var result notification.Result + var msg string + if call.Digits == digitClose { + result = notification.ResultResolve + msg = "Closed. Goodbye." + } else { + result = notification.ResultAcknowledge + msg = "Acknowledged. Goodbye." + } + errCh := make(chan error, 1) + v.respCh <- ¬ification.MessageResponse{ + Ctx: ctx, + ID: call.msgID, + From: notification.Dest{Type: notification.DestTypeVoice, Value: call.Number}, + Result: result, + Err: errCh, + } + err = <-errCh + if err != nil { + msg, err = voiceErrorMessage(ctx, err) + } + if errResp(false, errors.Wrap(err, "process response"), "Failed to process notification response.") { + return + } + + renderXML(w, req, twiMLEnd{ + Say: msg, + }) + return + } +} diff --git a/notification/verification.go b/notification/verification.go new file mode 100644 index 0000000000..c951f1da5c --- /dev/null +++ b/notification/verification.go @@ -0,0 +1,17 @@ +package notification + +// Verification represents outgoing verification code. +type Verification struct { + Dest Dest + CallbackID string // CallbackID is the identifier used to communicate a response to the notification + Code int +} + +var _ Message = &Test{} + +func (v Verification) Type() MessageType { return MessageTypeVerification } +func (v Verification) ID() string { return v.CallbackID } +func (v Verification) Destination() Dest { return v.Dest } +func (v Verification) Body() string { return "" } +func (v Verification) ExtendedBody() string { return "" } +func (v Verification) SubjectID() int { return v.Code } diff --git a/notificationchannel/channel.go b/notificationchannel/channel.go new file mode 100644 index 0000000000..8b54e8d272 --- /dev/null +++ b/notificationchannel/channel.go @@ -0,0 +1,33 @@ +package notificationchannel + +import ( + "github.com/target/goalert/validation/validate" + + uuid "github.com/satori/go.uuid" +) + +type Channel struct { + ID string + Name string + Type Type + Value string +} + +func (c Channel) Normalize() (*Channel, error) { + if c.ID == "" { + c.ID = uuid.NewV4().String() + } + + err := validate.Many( + validate.UUID("ID", c.ID), + validate.Text("Name", c.Name, 1, 255), + validate.OneOf("Type", c.Type, TypeSlack), + ) + + switch c.Type { + case TypeSlack: + err = validate.Many(err, validate.RequiredText("Value", c.Value, 1, 32)) + } + + return &c, err +} diff --git a/notificationchannel/store.go b/notificationchannel/store.go new file mode 100644 index 0000000000..2ec908daa4 --- /dev/null +++ b/notificationchannel/store.go @@ -0,0 +1,130 @@ +package notificationchannel + +import ( + "context" + "database/sql" + "github.com/target/goalert/permission" + "github.com/target/goalert/util" + "github.com/target/goalert/validation/validate" + + "github.com/lib/pq" +) + +type Store interface { + FindAll(context.Context) ([]Channel, error) + FindOne(context.Context, string) (*Channel, error) + CreateTx(context.Context, *sql.Tx, *Channel) (*Channel, error) + DeleteManyTx(context.Context, *sql.Tx, []string) error +} + +type DB struct { + db *sql.DB + + findAll *sql.Stmt + findOne *sql.Stmt + create *sql.Stmt + deleteMany *sql.Stmt +} + +func NewDB(ctx context.Context, db *sql.DB) (*DB, error) { + p := &util.Prepare{DB: db, Ctx: ctx} + + return &DB{ + db: db, + + findAll: p.P(` + select id, name, type, value from notification_channels + `), + findOne: p.P(` + select id, name, type, value from notification_channels where id = $1 + `), + create: p.P(` + insert into notification_channels (id, name, type, value) + values ($1, $2, $3, $4) + `), + deleteMany: p.P(`DELETE FROM notification_channels WHERE id = any($1)`), + }, p.Err +} + +func (db *DB) CreateTx(ctx context.Context, tx *sql.Tx, c *Channel) (*Channel, error) { + err := permission.LimitCheckAny(ctx, permission.System, permission.User) + if err != nil { + return nil, err + } + + n, err := c.Normalize() + if err != nil { + return nil, err + } + + _, err = tx.StmtContext(ctx, db.create).ExecContext(ctx, n.ID, n.Name, n.Type, n.Value) + if err != nil { + return nil, err + } + + return n, nil +} + +func (db *DB) DeleteManyTx(ctx context.Context, tx *sql.Tx, ids []string) error { + err := permission.LimitCheckAny(ctx, permission.System, permission.User) + if err != nil { + return err + } + + err = validate.Range("Count", len(ids), 1, 100) + if err != nil { + return err + } + + del := db.deleteMany + if tx != nil { + tx.StmtContext(ctx, del) + } + + _, err = del.ExecContext(ctx, pq.StringArray(ids)) + return err +} + +func (db *DB) FindOne(ctx context.Context, id string) (*Channel, error) { + err := permission.LimitCheckAny(ctx, permission.System, permission.User) + if err != nil { + return nil, err + } + + err = validate.UUID("ChannelID", id) + if err != nil { + return nil, err + } + + var c Channel + err = db.findOne.QueryRowContext(ctx, id).Scan(&c.ID, &c.Name, &c.Type, &c.Value) + if err != nil { + return nil, err + } + return &c, nil +} + +func (db *DB) FindAll(ctx context.Context) ([]Channel, error) { + err := permission.LimitCheckAny(ctx, permission.System, permission.User) + if err != nil { + return nil, err + } + + rows, err := db.findAll.QueryContext(ctx) + if err != nil { + return nil, err + } + defer rows.Close() + + var channels []Channel + for rows.Next() { + var c Channel + err = rows.Scan(&c.ID, &c.Name, &c.Type, &c.Value) + if err != nil { + return nil, err + } + channels = append(channels, c) + } + + return channels, nil +} diff --git a/notificationchannel/type.go b/notificationchannel/type.go new file mode 100644 index 0000000000..aac81590af --- /dev/null +++ b/notificationchannel/type.go @@ -0,0 +1,34 @@ +package notificationchannel + +import ( + "fmt" + "github.com/target/goalert/notification" +) + +type Type string + +const ( + TypeSlack Type = "SLACK" +) + +func (t Type) DestType() notification.DestType { + switch t { + case TypeSlack: + return notification.DestTypeSlackChannel + } + return 0 +} + +// Scan handles reading a Type from the DB format +func (r *Type) Scan(value interface{}) error { + switch t := value.(type) { + case []byte: + *r = Type(t) + case string: + *r = Type(t) + default: + return fmt.Errorf("could not process unknown type for channel type %T", t) + } + + return nil +} diff --git a/oncall/pool.go b/oncall/pool.go new file mode 100644 index 0000000000..3bec1969db --- /dev/null +++ b/oncall/pool.go @@ -0,0 +1,41 @@ +package oncall + +import ( + "sync" +) + +var ( + activeMapPool = &sync.Pool{ + New: func() interface{} { return make(map[string]struct{}, 20) }, + } + overrideMapPool = &sync.Pool{ + New: func() interface{} { return make(map[string]string, 20) }, + } + shiftMapPool = &sync.Pool{ + New: func() interface{} { return make(map[string]*Shift, 20) }, + } +) + +func getShiftMap() map[string]*Shift { return shiftMapPool.Get().(map[string]*Shift) } +func putShiftMap(m map[string]*Shift) { + for k := range m { + delete(m, k) + } + shiftMapPool.Put(m) +} + +func getActiveMap() map[string]struct{} { return activeMapPool.Get().(map[string]struct{}) } +func putActiveMap(m map[string]struct{}) { + for k := range m { + delete(m, k) + } + activeMapPool.Put(m) +} + +func getOverrideMap() map[string]string { return overrideMapPool.Get().(map[string]string) } +func putOverrideMap(m map[string]string) { + for k := range m { + delete(m, k) + } + overrideMapPool.Put(m) +} diff --git a/oncall/state.go b/oncall/state.go new file mode 100644 index 0000000000..42bcba3e12 --- /dev/null +++ b/oncall/state.go @@ -0,0 +1,237 @@ +package oncall + +import ( + "github.com/target/goalert/assignment" + "github.com/target/goalert/override" + "github.com/target/goalert/schedule/rotation" + "github.com/target/goalert/schedule/rule" + "sort" + "time" +) + +type resolvedRule struct { + rule.Rule + Rotation *resolvedRotation +} +type resolvedRotation struct { + rotation.Rotation + CurrentIndex int + CurrentStart time.Time + CurrentEnd time.Time + Users []string +} + +type state struct { + rules []resolvedRule + overrides []override.UserOverride + history []Shift + now time.Time + loc *time.Location +} + +func (r *resolvedRotation) UserID(t time.Time) string { + if r == nil || len(r.Users) == 0 { + return "" + } + if len(r.Users) == 1 { + return r.Users[0] + } + + if r.CurrentEnd.IsZero() { + r.CurrentStart = r.StartTime(r.CurrentStart) + r.CurrentEnd = r.EndTime(r.CurrentStart) + } + + if t.Before(r.CurrentEnd) && !t.Before(r.CurrentStart) { + return r.Users[r.CurrentIndex] + } + + for !t.Before(r.CurrentEnd) { + r.CurrentStart = r.CurrentEnd + r.CurrentEnd = r.EndTime(r.CurrentStart) + r.CurrentIndex++ + } + for t.Before(r.CurrentStart) { + r.CurrentEnd = r.CurrentStart + r.CurrentStart = r.StartTime(r.CurrentStart.Add(-1)) + r.CurrentIndex-- + } + r.CurrentIndex %= len(r.Users) + if r.CurrentIndex < 0 { + r.CurrentIndex += len(r.Users) + } + + return r.Users[r.CurrentIndex] +} +func (r resolvedRule) UserID(t time.Time) string { + if !r.IsActive(t) { + return "" + } + switch r.Target.TargetType() { + case assignment.TargetTypeUser: + return r.Target.TargetID() + case assignment.TargetTypeRotation: + return r.Rotation.UserID(t) + } + panic("unknown target type " + r.Target.TargetType().String()) +} + +// trimShifts deletes and returns any shifts in the map that would not be on call at the given timestamp. +// If addNew is true: +// - Existing shifts will have their End time updated with t (if on-call at t) +// - New shifts will be added (with Start and End set to t) if on-call at t +func (s *state) trimShifts(t time.Time, m map[string]*Shift, addNew bool, appendTo []Shift) []Shift { + active := getActiveMap() + defer putActiveMap(active) + + ovMap := getOverrideMap() + defer putOverrideMap(ovMap) + for _, ov := range s.overrides { + if !ov.End.After(t) { + continue + } + if ov.Start.After(t) { + continue + } + if ov.RemoveUserID == "" { + active[ov.AddUserID] = struct{}{} + } else { + ovMap[ov.RemoveUserID] = ov.AddUserID + } + } + for _, r := range s.rules { + userID := r.UserID(t) + if userID == "" { + continue + } + + if nextUser, ok := ovMap[userID]; ok { + userID = nextUser + } + if userID == "" { + continue + } + + active[userID] = struct{}{} + } + + for userID := range active { + s, ok := m[userID] + if !ok && addNew { + m[userID] = &Shift{ + UserID: userID, + Start: t, + End: t, + } + } else if ok { + if t.After(s.End) { + s.End = t + } + if t.Before(s.Start) { + s.Start = t + } + } + } + + for userID, s := range m { + _, ok := active[userID] + if !ok { + if addNew { + s.End = t + } + appendTo = append(appendTo, *s) + delete(m, userID) + } + } + + return appendTo +} +func (s *state) sanitize() { + s.now = s.now.Truncate(time.Minute).In(s.loc) + + for i, o := range s.overrides { + o.Start = o.Start.Truncate(time.Minute) + o.End = o.End.Truncate(time.Minute) + s.overrides[i] = o + } + for i, h := range s.history { + h.End = h.End.Truncate(time.Minute) + h.Start = h.Start.Truncate(time.Minute) + s.history[i] = h + } + for _, r := range s.rules { + if r.Rotation != nil { + r.Rotation.Start = r.Rotation.Start.Truncate(time.Minute) + r.Rotation.CurrentStart = r.Rotation.CurrentStart.Truncate(time.Minute) + r.Rotation.CurrentEnd = r.Rotation.CurrentEnd.Truncate(time.Minute) + } + } +} +func (s *state) CalculateShifts(start, end time.Time) []Shift { + start = start.In(s.loc).Truncate(time.Minute) + end = end.In(s.loc).Truncate(time.Minute) + s.sanitize() + if !start.After(s.now) { + start = s.now.Add(time.Minute) + } + + curShifts := getShiftMap() + defer putShiftMap(curShifts) + + s.trimShifts(start, curShifts, true, nil) + + shifts := make([]Shift, 0, 100) + t := start + for len(curShifts) > 0 && t.After(s.now) { + t = t.Add(-time.Minute) + shifts = s.trimShifts(t, curShifts, false, shifts) + } + + historyShifts := make([]Shift, 0, len(s.history)) + activeShifts := make(map[string]Shift, len(s.history)) + for _, s := range s.history { + if s.End.IsZero() { + activeShifts[s.UserID] = s + } else { + historyShifts = append(historyShifts, s) + } + } + + // start time <= now + for userID, s := range curShifts { + if act, ok := activeShifts[userID]; ok { + // We have a record of this shift, so use the provided start time. + // If we don't, then it is not active 'now' but a rule says it will be + // in the next minute (so we leave it as-is). + s.Start = act.Start + } + shifts = append(shifts, *s) + delete(curShifts, userID) + } + + for _, s := range shifts { + cpy := s + curShifts[s.UserID] = &cpy + } + + shifts = append(shifts[:0], historyShifts...) + t = start + for !t.After(end) { + t = t.Add(time.Minute) + shifts = s.trimShifts(t, curShifts, true, shifts) + } + for _, s := range curShifts { // still active @ +1 minute + s.End = s.End.Add(-time.Minute) + s.Truncated = true + shifts = append(shifts, *s) + } + + sort.Slice(shifts, func(i, j int) bool { + if !shifts[i].Start.Equal(shifts[j].Start) { + return shifts[i].Start.Before(shifts[j].Start) + } + return shifts[i].UserID < shifts[j].UserID + }) + + return shifts +} diff --git a/oncall/state_test.go b/oncall/state_test.go new file mode 100644 index 0000000000..fe6091ba74 --- /dev/null +++ b/oncall/state_test.go @@ -0,0 +1,610 @@ +package oncall + +import ( + "github.com/target/goalert/assignment" + "github.com/target/goalert/override" + "github.com/target/goalert/schedule/rotation" + "github.com/target/goalert/schedule/rule" + "testing" + "time" +) + +func BenchmarkState_CalculateShifts(b *testing.B) { + s := &state{ + loc: time.UTC, + rules: []resolvedRule{ + {Rule: rule.Rule{ + WeekdayFilter: rule.WeekdayFilter{1, 1, 1, 1, 1, 1, 1}, + Start: rule.NewClock(8, 0), + End: rule.NewClock(10, 0), + Target: assignment.UserTarget("foobar"), + }}, + {Rule: rule.Rule{ + WeekdayFilter: rule.WeekdayFilter{1, 1, 1, 1, 1, 1, 1}, + Start: rule.NewClock(8, 0), + End: rule.NewClock(10, 0), + Target: assignment.UserTarget("foobar"), + }}, + {Rule: rule.Rule{ + WeekdayFilter: rule.WeekdayFilter{1, 0, 1, 1, 1, 1, 1}, + Start: rule.NewClock(8, 0), + End: rule.NewClock(10, 0), + Target: assignment.UserTarget("foobar"), + }}, + {Rule: rule.Rule{ + WeekdayFilter: rule.WeekdayFilter{1, 1, 1, 1, 0, 1, 1}, + Start: rule.NewClock(8, 0), + End: rule.NewClock(10, 0), + Target: assignment.UserTarget("foobar2"), + }}, + {Rule: rule.Rule{ + WeekdayFilter: rule.WeekdayFilter{1, 1, 1, 1, 1, 1, 1}, + Start: rule.NewClock(8, 0), + End: rule.NewClock(10, 0), + Target: assignment.UserTarget("fooba4r"), + }}, + { + Rule: rule.Rule{ + WeekdayFilter: rule.WeekdayFilter{1, 1, 1, 1, 1, 1, 1}, + Start: rule.NewClock(8, 0), + End: rule.NewClock(10, 0), + Target: assignment.RotationTarget("fooba4r"), + }, + Rotation: &resolvedRotation{ + Rotation: rotation.Rotation{ + Type: rotation.TypeDaily, + ShiftLength: 2, + Start: time.Date(2017, 1, 2, 3, 4, 5, 6, time.UTC), + }, + Users: []string{"a", "b", "c", "d", "e"}, + }, + }, + { + Rule: rule.Rule{ + WeekdayFilter: rule.WeekdayFilter{1, 1, 1, 1, 1, 1, 1}, + Start: rule.NewClock(8, 0), + End: rule.NewClock(10, 0), + Target: assignment.RotationTarget("fooba4r"), + }, + Rotation: &resolvedRotation{ + Rotation: rotation.Rotation{ + Type: rotation.TypeDaily, + ShiftLength: 2, + Start: time.Date(2017, 1, 2, 3, 4, 5, 6, time.UTC), + }, + Users: []string{"a", "b", "c", "d", "e"}, + }, + }, + { + Rule: rule.Rule{ + WeekdayFilter: rule.WeekdayFilter{1, 1, 1, 1, 1, 1, 1}, + Start: rule.NewClock(8, 0), + End: rule.NewClock(10, 0), + Target: assignment.RotationTarget("fooba4r"), + }, + Rotation: &resolvedRotation{ + Rotation: rotation.Rotation{ + Type: rotation.TypeDaily, + ShiftLength: 2, + Start: time.Date(2017, 1, 2, 3, 4, 5, 6, time.UTC), + }, + Users: []string{"a", "b", "c", "d", "e"}, + }, + }, + }, + overrides: []override.UserOverride{ + override.UserOverride{ + AddUserID: "binbaz", + RemoveUserID: "foobar", + Start: time.Date(2018, 1, 1, 8, 30, 0, 0, time.UTC), + End: time.Date(2018, 1, 1, 8, 45, 0, 0, time.UTC), + }, + override.UserOverride{ + AddUserID: "binbaz2", + RemoveUserID: "foobar", + Start: time.Date(2018, 1, 1, 8, 30, 0, 0, time.UTC), + End: time.Date(2018, 1, 1, 8, 45, 0, 0, time.UTC), + }, + override.UserOverride{ + AddUserID: "binbaz", + RemoveUserID: "foob3ar", + Start: time.Date(2018, 1, 1, 8, 30, 0, 0, time.UTC), + End: time.Date(2018, 1, 1, 8, 45, 0, 0, time.UTC), + }, + }, + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + s.CalculateShifts( + time.Date(2018, 1, 1, 8, 0, 0, 0, time.UTC), // 8:00AM + time.Date(2018, 1, 2, 8, 0, 0, 0, time.UTC), // 9:00AM + ) + } +} + +func TestResolvedRotation_UserID(t *testing.T) { + rot := &resolvedRotation{ + Rotation: rotation.Rotation{ + ID: "rot", + Type: rotation.TypeWeekly, + Start: time.Date(2018, 4, 21, 14, 8, 9, 168379000, time.UTC), + ShiftLength: 6, + }, + CurrentIndex: 0, + CurrentStart: time.Date(2018, 8, 30, 21, 54, 38, 334304000, time.UTC), + Users: []string{ + "Javon Goodwin", + "Nora Bode", + "Coby Blanda", + "Clyde Reinger", + "Justina Moen", + "Herman Donnelly", + "Timmothy OReilly", + "Alvis Upton", + "Name Bayer", + "Daron Hirthe", + "Ismael Goodwin", + "Andrew Lockman", + "Adalberto Dare", + "Liliana Moen", + "Brant Abbott", + "Nia Purdy", + "Modesto Nolan", + "Angelica Leannon", + "Cleo Heaney", + "Osborne Batz", + "Lyda Christiansen", + "Loyal Green", + "Mose Lindgren", + "Camilla Stehr", + }, + } + + id := rot.UserID(time.Date(2018, 9, 10, 2, 44, 0, 0, time.UTC)) + if id != "Javon Goodwin" { + t.Fatalf("got '%s'; want '%s'", id, "Javon Goodwin") + } +} + +func TestState_CalculateShifts(t *testing.T) { + check := func(name string, start, end time.Time, s *state, exp []Shift) { + t.Run(name, func(t *testing.T) { + res := s.CalculateShifts(start, end) + for i, exp := range exp { + if i >= len(res) { + t.Errorf("shift[%d]: missing", i) + continue + } + if !res[i].Start.Equal(exp.Start) { + t.Errorf("shift[%d]: start = %s; want %s", i, res[i].Start.In(exp.Start.Location()), exp.Start) + } + if !res[i].End.Equal(exp.End) { + t.Errorf("shift[%d]: end = %s; want %s", i, res[i].End.In(exp.End.Location()), exp.End) + } + if res[i].Truncated != exp.Truncated { + t.Errorf("shift[%d]: truncated = %t; want %t", i, res[i].Truncated, exp.Truncated) + } + if res[i].UserID != exp.UserID { + t.Errorf("shift[%d]: userID = %s; want %s", i, res[i].UserID, exp.UserID) + } + } + if len(res) > len(exp) { + for _, res := range res[len(exp):] { + t.Errorf("extra shift: %v", res) + } + } + }) + } + + check("Simple", + time.Date(2018, 1, 1, 8, 0, 0, 0, time.UTC), // 8:00AM + time.Date(2018, 1, 1, 9, 0, 0, 0, time.UTC), // 9:00AM + &state{ + loc: time.UTC, + rules: []resolvedRule{ + {Rule: rule.Rule{ + WeekdayFilter: rule.WeekdayFilter{1, 1, 1, 1, 1, 1, 1}, + Start: rule.NewClock(8, 0), + End: rule.NewClock(10, 0), + Target: assignment.UserTarget("foobar"), + }}, + }, + }, + []Shift{ + { + Start: time.Date(2018, 1, 1, 8, 0, 0, 0, time.UTC), + End: time.Date(2018, 1, 1, 9, 0, 0, 0, time.UTC), + Truncated: true, + UserID: "foobar", + }, + }, + ) + + check("SimpleWeek", + time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(2018, 1, 8, 0, 0, 0, 0, time.UTC), + &state{ + loc: time.UTC, + rules: []resolvedRule{ + {Rule: rule.Rule{ + WeekdayFilter: rule.WeekdayFilter{1, 1, 1, 1, 1, 1, 1}, + Start: rule.NewClock(8, 0), + End: rule.NewClock(9, 0), + Target: assignment.UserTarget("foobar"), + }}, + }, + }, + []Shift{ + { + Start: time.Date(2018, 1, 1, 8, 0, 0, 0, time.UTC), + End: time.Date(2018, 1, 1, 9, 0, 0, 0, time.UTC), + UserID: "foobar", + }, + { + Start: time.Date(2018, 1, 2, 8, 0, 0, 0, time.UTC), + End: time.Date(2018, 1, 2, 9, 0, 0, 0, time.UTC), + UserID: "foobar", + }, + { + Start: time.Date(2018, 1, 3, 8, 0, 0, 0, time.UTC), + End: time.Date(2018, 1, 3, 9, 0, 0, 0, time.UTC), + UserID: "foobar", + }, + { + Start: time.Date(2018, 1, 4, 8, 0, 0, 0, time.UTC), + End: time.Date(2018, 1, 4, 9, 0, 0, 0, time.UTC), + UserID: "foobar", + }, + { + Start: time.Date(2018, 1, 5, 8, 0, 0, 0, time.UTC), + End: time.Date(2018, 1, 5, 9, 0, 0, 0, time.UTC), + UserID: "foobar", + }, + { + Start: time.Date(2018, 1, 6, 8, 0, 0, 0, time.UTC), + End: time.Date(2018, 1, 6, 9, 0, 0, 0, time.UTC), + UserID: "foobar", + }, + { + Start: time.Date(2018, 1, 7, 8, 0, 0, 0, time.UTC), + End: time.Date(2018, 1, 7, 9, 0, 0, 0, time.UTC), + UserID: "foobar", + }, + }, + ) + + check("ReplaceOverride", + time.Date(2018, 1, 1, 8, 0, 0, 0, time.UTC), // 8:00AM + time.Date(2018, 1, 1, 9, 0, 0, 0, time.UTC), // 9:00AM + &state{ + loc: time.UTC, + rules: []resolvedRule{ + {Rule: rule.Rule{ + WeekdayFilter: rule.WeekdayFilter{1, 1, 1, 1, 1, 1, 1}, + Start: rule.NewClock(8, 0), + End: rule.NewClock(10, 0), + Target: assignment.UserTarget("foobar"), + }}, + }, + overrides: []override.UserOverride{ + override.UserOverride{ + AddUserID: "binbaz", + RemoveUserID: "foobar", + Start: time.Date(2018, 1, 1, 8, 30, 0, 0, time.UTC), + End: time.Date(2018, 1, 1, 8, 45, 0, 0, time.UTC), + }, + }, + }, + []Shift{ + { + Start: time.Date(2018, 1, 1, 8, 0, 0, 0, time.UTC), + End: time.Date(2018, 1, 1, 8, 30, 0, 0, time.UTC), + Truncated: false, + UserID: "foobar", + }, + { + Start: time.Date(2018, 1, 1, 8, 30, 0, 0, time.UTC), + End: time.Date(2018, 1, 1, 8, 45, 0, 0, time.UTC), + Truncated: false, + UserID: "binbaz", + }, + { + Start: time.Date(2018, 1, 1, 8, 45, 0, 0, time.UTC), + End: time.Date(2018, 1, 1, 9, 0, 0, 0, time.UTC), + Truncated: true, + UserID: "foobar", + }, + }, + ) + + check("AddOverride", + time.Date(2018, 1, 1, 8, 0, 0, 0, time.UTC), // 8:00AM + time.Date(2018, 1, 1, 9, 0, 0, 0, time.UTC), // 9:00AM + &state{ + loc: time.UTC, + rules: []resolvedRule{ + {Rule: rule.Rule{ + WeekdayFilter: rule.WeekdayFilter{1, 1, 1, 1, 1, 1, 1}, + Start: rule.NewClock(8, 0), + End: rule.NewClock(10, 0), + Target: assignment.UserTarget("foobar"), + }}, + }, + overrides: []override.UserOverride{ + override.UserOverride{ + AddUserID: "binbaz", + Start: time.Date(2018, 1, 1, 8, 30, 0, 0, time.UTC), + End: time.Date(2018, 1, 1, 8, 45, 0, 0, time.UTC), + }, + }, + }, + []Shift{ + { + Start: time.Date(2018, 1, 1, 8, 0, 0, 0, time.UTC), + End: time.Date(2018, 1, 1, 9, 0, 0, 0, time.UTC), + Truncated: true, + UserID: "foobar", + }, + { + Start: time.Date(2018, 1, 1, 8, 30, 0, 0, time.UTC), + End: time.Date(2018, 1, 1, 8, 45, 0, 0, time.UTC), + Truncated: false, + UserID: "binbaz", + }, + }, + ) + + check("RemoveOverride", + time.Date(2018, 1, 1, 8, 0, 0, 0, time.UTC), // 8:00AM + time.Date(2018, 1, 1, 9, 0, 0, 0, time.UTC), // 9:00AM + &state{ + loc: time.UTC, + rules: []resolvedRule{ + {Rule: rule.Rule{ + WeekdayFilter: rule.WeekdayFilter{1, 1, 1, 1, 1, 1, 1}, + Start: rule.NewClock(8, 0), + End: rule.NewClock(10, 0), + Target: assignment.UserTarget("foobar"), + }}, + }, + overrides: []override.UserOverride{ + override.UserOverride{ + RemoveUserID: "foobar", + Start: time.Date(2018, 1, 1, 8, 30, 0, 0, time.UTC), + End: time.Date(2018, 1, 1, 8, 45, 0, 0, time.UTC), + }, + }, + }, + []Shift{ + { + Start: time.Date(2018, 1, 1, 8, 0, 0, 0, time.UTC), + End: time.Date(2018, 1, 1, 8, 30, 0, 0, time.UTC), + Truncated: false, + UserID: "foobar", + }, + { + Start: time.Date(2018, 1, 1, 8, 45, 0, 0, time.UTC), + End: time.Date(2018, 1, 1, 9, 0, 0, 0, time.UTC), + Truncated: true, + UserID: "foobar", + }, + }, + ) + + check("History", + time.Date(2018, 1, 1, 8, 0, 0, 0, time.UTC), // 8:00AM + time.Date(2018, 1, 1, 9, 0, 0, 0, time.UTC), // 9:00AM + &state{ + now: time.Date(2018, 1, 1, 8, 0, 0, 0, time.UTC), + loc: time.UTC, + history: []Shift{ + { + UserID: "foobar", + Start: time.Date(2018, 1, 1, 7, 0, 0, 0, time.UTC), // user actually started at 7 + }, + }, + rules: []resolvedRule{ + {Rule: rule.Rule{ + WeekdayFilter: rule.WeekdayFilter{1, 1, 1, 1, 1, 1, 1}, + Start: rule.NewClock(8, 0), + End: rule.NewClock(10, 0), + Target: assignment.UserTarget("foobar"), + }}, + }, + }, + []Shift{ + { + Start: time.Date(2018, 1, 1, 7, 0, 0, 0, time.UTC), + End: time.Date(2018, 1, 1, 9, 0, 0, 0, time.UTC), + Truncated: true, + UserID: "foobar", + }, + }, + ) + + check("Rotation", + time.Date(2018, 9, 10, 0, 0, 0, 0, time.UTC), // 8:00AM + time.Date(2018, 9, 17, 0, 0, 0, 0, time.UTC), // 9:00AM + &state{ + now: time.Date(2018, 9, 10, 14, 44, 0, 0, time.UTC), + loc: time.UTC, + history: []Shift{ + { + UserID: "Javon Goodwin", + Start: time.Date(2018, 9, 10, 2, 25, 0, 0, time.UTC), + End: time.Date(2018, 9, 10, 5, 29, 0, 0, time.UTC), + }, + }, + rules: []resolvedRule{ + {Rule: rule.Rule{ + WeekdayFilter: rule.WeekdayFilter{1, 1, 1, 1, 0, 0, 0}, + Start: rule.NewClock(19, 40), + End: rule.NewClock(22, 53), + Target: assignment.RotationTarget("rot"), + }, + Rotation: &resolvedRotation{ + Rotation: rotation.Rotation{ + ID: "rot", + Type: rotation.TypeWeekly, + Start: time.Date(2018, 4, 21, 14, 8, 9, 168379000, time.UTC), + ShiftLength: 6, + }, + CurrentIndex: 0, + CurrentStart: time.Date(2018, 8, 30, 21, 54, 38, 334304000, time.UTC), + Users: []string{ + "Javon Goodwin", + "Nora Bode", + "Coby Blanda", + "Clyde Reinger", + "Justina Moen", + "Herman Donnelly", + "Timmothy OReilly", + "Alvis Upton", + "Name Bayer", + "Daron Hirthe", + "Ismael Goodwin", + "Andrew Lockman", + "Adalberto Dare", + "Liliana Moen", + "Brant Abbott", + "Nia Purdy", + "Modesto Nolan", + "Angelica Leannon", + "Cleo Heaney", + "Osborne Batz", + "Lyda Christiansen", + "Loyal Green", + "Mose Lindgren", + "Camilla Stehr", + }, + }, + }, + }, + }, + []Shift{ + { + Start: time.Date(2018, 9, 10, 2, 25, 0, 0, time.UTC), + End: time.Date(2018, 9, 10, 5, 29, 0, 0, time.UTC), + Truncated: false, + UserID: "Javon Goodwin", + }, + { + Start: time.Date(2018, 9, 10, 19, 40, 0, 0, time.UTC), + End: time.Date(2018, 9, 10, 22, 53, 0, 0, time.UTC), + Truncated: false, + UserID: "Javon Goodwin", + }, + { + Start: time.Date(2018, 9, 11, 19, 40, 0, 0, time.UTC), + End: time.Date(2018, 9, 11, 22, 53, 0, 0, time.UTC), + Truncated: false, + UserID: "Javon Goodwin", + }, + { + Start: time.Date(2018, 9, 12, 19, 40, 0, 0, time.UTC), + End: time.Date(2018, 9, 12, 22, 53, 0, 0, time.UTC), + Truncated: false, + UserID: "Javon Goodwin", + }, + { + Start: time.Date(2018, 9, 16, 19, 40, 0, 0, time.UTC), + End: time.Date(2018, 9, 16, 22, 53, 0, 0, time.UTC), + Truncated: false, + UserID: "Javon Goodwin", + }, + }, + ) + + central, err := time.LoadLocation("America/Chicago") + if err != nil { + t.Fatal(err) + } + check( + "DailyRotation", + time.Date(2018, 9, 10, 0, 0, 0, 0, time.UTC), + time.Date(2018, 9, 17, 0, 0, 0, 0, time.UTC), + &state{ + now: time.Date(2018, 9, 12, 10, 25, 0, 0, central), + loc: central, + history: []Shift{ + {UserID: "Craig", Start: time.Date(2018, 9, 12, 13, 5, 0, 0, time.UTC)}, + {UserID: "Caza", Start: time.Date(2018, 9, 12, 13, 0, 3, 0, time.UTC), End: time.Date(2018, 9, 12, 13, 1, 3, 0, time.UTC)}, + {UserID: "Cook", Start: time.Date(2018, 9, 12, 1, 0, 3, 0, time.UTC), End: time.Date(2018, 9, 12, 13, 0, 3, 0, time.UTC)}, + {UserID: "Aru", Start: time.Date(2018, 9, 11, 13, 0, 1, 0, time.UTC), End: time.Date(2018, 9, 12, 1, 1, 3, 0, time.UTC)}, + {UserID: "Caza", Start: time.Date(2018, 9, 11, 1, 0, 1, 0, time.UTC), End: time.Date(2018, 9, 11, 13, 0, 1, 0, time.UTC)}, + {UserID: "Donna", Start: time.Date(2018, 9, 10, 13, 0, 0, 0, time.UTC), End: time.Date(2018, 9, 11, 1, 0, 1, 0, time.UTC)}, + {UserID: "Cook", Start: time.Date(2018, 9, 10, 1, 0, 0, 0, time.UTC), End: time.Date(2018, 9, 10, 13, 0, 0, 0, time.UTC)}, + {UserID: "Craig", Start: time.Date(2018, 9, 9, 13, 0, 0, 0, time.UTC), End: time.Date(2018, 9, 10, 1, 0, 0, 0, time.UTC)}, + }, + rules: []resolvedRule{ + {Rule: rule.Rule{ + WeekdayFilter: rule.WeekdayFilter{1, 1, 1, 1, 1, 1, 1}, + Start: rule.NewClock(8, 0), + End: rule.NewClock(20, 0), + Target: assignment.RotationTarget("rot-day"), + }, + Rotation: &resolvedRotation{ + Rotation: rotation.Rotation{ + ID: "rot-day", + Type: rotation.TypeDaily, + Start: time.Date(2018, 6, 15, 13, 0, 0, 0, time.UTC).In(central), + ShiftLength: 1, + }, + CurrentIndex: 2, + CurrentStart: time.Date(2018, 9, 12, 13, 0, 3, 0, time.UTC), + Users: []string{ + "Donna", + "Aru", + "Craig", + }, + }, + }, + {Rule: rule.Rule{ + WeekdayFilter: rule.WeekdayFilter{1, 1, 1, 1, 1, 1, 1}, + Start: rule.NewClock(20, 0), + End: rule.NewClock(8, 0), + Target: assignment.RotationTarget("rot-night"), + }, + Rotation: &resolvedRotation{ + Rotation: rotation.Rotation{ + ID: "rot-night", + Type: rotation.TypeDaily, + Start: time.Date(2018, 6, 15, 13, 0, 0, 0, time.UTC), + ShiftLength: 1, + }, + CurrentIndex: 0, + CurrentStart: time.Date(2018, 9, 12, 13, 0, 3, 0, time.UTC), + Users: []string{ + "Caza", + "Cook", + }, + }, + }, + }, + }, + []Shift{ + // history shifts + {UserID: "Craig", Start: time.Date(2018, 9, 9, 13, 0, 0, 0, time.UTC), End: time.Date(2018, 9, 10, 1, 0, 0, 0, time.UTC)}, + {UserID: "Cook", Start: time.Date(2018, 9, 10, 1, 0, 0, 0, time.UTC), End: time.Date(2018, 9, 10, 13, 0, 0, 0, time.UTC)}, + {UserID: "Donna", Start: time.Date(2018, 9, 10, 13, 0, 0, 0, time.UTC), End: time.Date(2018, 9, 11, 1, 0, 0, 0, time.UTC)}, + {UserID: "Caza", Start: time.Date(2018, 9, 11, 1, 0, 0, 0, time.UTC), End: time.Date(2018, 9, 11, 13, 0, 0, 0, time.UTC)}, + {UserID: "Aru", Start: time.Date(2018, 9, 11, 13, 0, 0, 0, time.UTC), End: time.Date(2018, 9, 12, 1, 1, 0, 0, time.UTC)}, + {UserID: "Cook", Start: time.Date(2018, 9, 12, 1, 0, 0, 0, time.UTC), End: time.Date(2018, 9, 12, 13, 0, 0, 0, time.UTC)}, + {UserID: "Caza", Start: time.Date(2018, 9, 12, 13, 0, 0, 0, time.UTC), End: time.Date(2018, 9, 12, 13, 1, 0, 0, time.UTC)}, + + // in-progress + {UserID: "Craig", Start: time.Date(2018, 9, 12, 13, 5, 0, 0, time.UTC), End: time.Date(2018, 9, 12, 20, 0, 0, 0, central)}, + + // future + {UserID: "Caza", Start: time.Date(2018, 9, 12, 20, 0, 0, 0, central), End: time.Date(2018, 9, 13, 8, 0, 0, 0, central)}, + {UserID: "Donna", Start: time.Date(2018, 9, 13, 8, 0, 0, 0, central), End: time.Date(2018, 9, 13, 20, 0, 0, 0, central)}, + {UserID: "Cook", Start: time.Date(2018, 9, 13, 20, 0, 0, 0, central), End: time.Date(2018, 9, 14, 8, 0, 0, 0, central)}, + {UserID: "Aru", Start: time.Date(2018, 9, 14, 8, 0, 0, 0, central), End: time.Date(2018, 9, 14, 20, 0, 0, 0, central)}, + {UserID: "Caza", Start: time.Date(2018, 9, 14, 20, 0, 0, 0, central), End: time.Date(2018, 9, 15, 8, 0, 0, 0, central)}, + {UserID: "Craig", Start: time.Date(2018, 9, 15, 8, 0, 0, 0, central), End: time.Date(2018, 9, 15, 20, 0, 0, 0, central)}, + {UserID: "Cook", Start: time.Date(2018, 9, 15, 20, 0, 0, 0, central), End: time.Date(2018, 9, 16, 8, 0, 0, 0, central)}, + {UserID: "Donna", Start: time.Date(2018, 9, 16, 8, 0, 0, 0, central), End: time.Date(2018, 9, 16, 19, 0, 0, 0, central), Truncated: true}, + }, + ) + +} diff --git a/oncall/store.go b/oncall/store.go new file mode 100644 index 0000000000..6a060c436f --- /dev/null +++ b/oncall/store.go @@ -0,0 +1,290 @@ +package oncall + +import ( + "context" + "database/sql" + "github.com/target/goalert/assignment" + "github.com/target/goalert/override" + "github.com/target/goalert/permission" + "github.com/target/goalert/schedule/rule" + "github.com/target/goalert/util" + "github.com/target/goalert/validation/validate" + "time" + + "github.com/lib/pq" + "github.com/pkg/errors" +) + +// Store allows retrieving and calculating on-call information. +type Store interface { + OnCallUsersByService(ctx context.Context, serviceID string) ([]ServiceOnCallUser, error) + + // HistoryBySchedule(ctx context.Context, stepID string, start, end time.Time) ([]Shift, error) + HistoryBySchedule(ctx context.Context, scheduleID string, start, end time.Time) ([]Shift, error) +} + +// ServiceOnCallUser represents a currently on-call user for a service. +type ServiceOnCallUser struct { + StepNumber int `json:"step_number"` + UserID string `json:"user_id"` + UserName string `json:"user_name"` +} + +// A Shift represents a duration a user is on-call. +// If truncated is true, then the End time does not represent +// the time the user stopped being on call, instead it indicates +// they were still on-call at that time. +type Shift struct { + UserID string `json:"user_id"` + Start time.Time `json:"start_time"` + End time.Time `json:"end_time"` + Truncated bool `json:"truncated"` +} + +// DB implements the Store interface from Postgres. +type DB struct { + db *sql.DB + + onCallUsersSvc *sql.Stmt + schedOverrides *sql.Stmt + + schedOnCall *sql.Stmt + schedTZ *sql.Stmt + schedRot *sql.Stmt + rotParts *sql.Stmt + + ruleStore rule.Store +} + +// NewDB will create a new DB, preparing required statements using the provided context. +func NewDB(ctx context.Context, db *sql.DB, ruleStore rule.Store) (*DB, error) { + p := &util.Prepare{DB: db, Ctx: ctx} + + return &DB{ + db: db, + ruleStore: ruleStore, + + schedOverrides: p.P(` + select + start_time, + end_time, + add_user_id, + remove_user_id + from user_overrides + where + tgt_schedule_id = $1 and + end_time > now() and + ($2, $3) OVERLAPS(start_time, end_time) + `), + + onCallUsersSvc: p.P(` + select step.step_number, oc.user_id, u.name as user_name + from services svc + join escalation_policy_steps step on step.escalation_policy_id = svc.escalation_policy_id + join ep_step_on_call_users oc on oc.ep_step_id = step.id and oc.end_time isnull + join users u on oc.user_id = u.id + where svc.id = $1 + order by step.step_number, oc.start_time + `), + + schedOnCall: p.P(` + select + user_id, + start_time, + end_time + from schedule_on_call_users + where + schedule_id = $1 and + ($2, $3) OVERLAPS (start_time, coalesce(end_time, 'infinity')) and + (end_time isnull or (end_time - start_time) > '1 minute'::interval) + `), + schedTZ: p.P(`select time_zone, now() from schedules where id = $1`), + schedRot: p.P(` + select distinct + rot.id, + rot.type, + rot.start_time, + rot.shift_length, + rot.time_zone, + state.position, + state.shift_start + from schedule_rules rule + join rotations rot on rot.id = rule.tgt_rotation_id + join rotation_state state on state.rotation_id = rule.tgt_rotation_id + where rule.schedule_id = $1 and rule.tgt_rotation_id notnull + `), + rotParts: p.P(` + select + rotation_id, + user_id + from rotation_participants + where rotation_id = any($1) + order by + rotation_id, + position + `), + }, p.Err +} + +// OnCallUsersByService will return the current set of users who are on-call for the given service. +func (db *DB) OnCallUsersByService(ctx context.Context, serviceID string) ([]ServiceOnCallUser, error) { + err := permission.LimitCheckAny(ctx, permission.User) + if err != nil { + return nil, err + } + err = validate.UUID("ServiceID", serviceID) + if err != nil { + return nil, err + } + rows, err := db.onCallUsersSvc.QueryContext(ctx, serviceID) + if err != nil { + return nil, err + } + defer rows.Close() + var onCall []ServiceOnCallUser + for rows.Next() { + var u ServiceOnCallUser + err = rows.Scan(&u.StepNumber, &u.UserID, &u.UserName) + if err != nil { + return nil, err + } + onCall = append(onCall, u) + } + return onCall, nil +} + +// HistoryBySchedule will return the list of shifts that overlap the start and end time for the given schedule. +func (db *DB) HistoryBySchedule(ctx context.Context, scheduleID string, start, end time.Time) ([]Shift, error) { + err := permission.LimitCheckAny(ctx, permission.User) + if err != nil { + return nil, err + } + err = validate.UUID("ScheduleID", scheduleID) + if err != nil { + return nil, err + } + + tx, err := db.db.BeginTx(ctx, &sql.TxOptions{ + ReadOnly: true, + Isolation: sql.LevelRepeatableRead, + }) + if err != nil { + return nil, errors.Wrap(err, "begin transaction") + } + defer tx.Rollback() + + var schedTZ string + var now time.Time + err = tx.StmtContext(ctx, db.schedTZ).QueryRowContext(ctx, scheduleID).Scan(&schedTZ, &now) + if err != nil { + return nil, errors.Wrap(err, "lookup schedule time zone") + } + + rows, err := tx.StmtContext(ctx, db.schedRot).QueryContext(ctx, scheduleID) + if err != nil { + return nil, errors.Wrap(err, "lookup schedule rotations") + } + defer rows.Close() + rots := make(map[string]*resolvedRotation) + var rotIDs []string + for rows.Next() { + var rot resolvedRotation + var rotTZ string + err = rows.Scan(&rot.ID, &rot.Type, &rot.Start, &rot.ShiftLength, &rotTZ, &rot.CurrentIndex, &rot.CurrentStart) + if err != nil { + return nil, errors.Wrap(err, "scan rotation info") + } + loc, err := util.LoadLocation(rotTZ) + if err != nil { + return nil, errors.Wrap(err, "load time zone info") + } + rot.Start = rot.Start.In(loc) + rots[rot.ID] = &rot + rotIDs = append(rotIDs, rot.ID) + } + + rows, err = tx.StmtContext(ctx, db.rotParts).QueryContext(ctx, pq.StringArray(rotIDs)) + if err != nil { + return nil, errors.Wrap(err, "lookup rotation participants") + } + defer rows.Close() + for rows.Next() { + var rotID, userID string + err = rows.Scan(&rotID, &userID) + if err != nil { + return nil, errors.Wrap(err, "scan rotation participant info") + } + rots[rotID].Users = append(rots[rotID].Users, userID) + } + + rawRules, err := db.ruleStore.FindAllTx(ctx, tx, scheduleID) + if err != nil { + return nil, errors.Wrap(err, "lookup schedule rules") + } + + var rules []resolvedRule + for _, r := range rawRules { + if r.Target.TargetType() == assignment.TargetTypeRotation { + rules = append(rules, resolvedRule{ + Rule: r, + Rotation: rots[r.Target.TargetID()], + }) + } else { + rules = append(rules, resolvedRule{Rule: r}) + } + } + + rows, err = tx.StmtContext(ctx, db.schedOnCall).QueryContext(ctx, scheduleID, start, end) + if err != nil { + return nil, errors.Wrap(err, "lookup on-call history") + } + defer rows.Close() + var userHistory []Shift + for rows.Next() { + var s Shift + var end pq.NullTime + err = rows.Scan(&s.UserID, &s.Start, &end) + if err != nil { + return nil, errors.Wrap(err, "scan on-call history info") + } + s.End = end.Time + userHistory = append(userHistory, s) + } + + rows, err = tx.StmtContext(ctx, db.schedOverrides).QueryContext(ctx, scheduleID, start, end) + if err != nil { + return nil, errors.Wrap(err, "lookup overrides") + } + defer rows.Close() + var overrides []override.UserOverride + for rows.Next() { + var add, rem sql.NullString + var ov override.UserOverride + err = rows.Scan(&ov.Start, &ov.End, &add, &rem) + if err != nil { + return nil, errors.Wrap(err, "scan override info") + } + ov.AddUserID = add.String + ov.RemoveUserID = rem.String + overrides = append(overrides, ov) + } + + err = tx.Commit() + if err != nil { + // Can't use the data we read (e.g. serialization error) + return nil, errors.Wrap(err, "commit tx") + } + tz, err := util.LoadLocation(schedTZ) + if err != nil { + return nil, errors.Wrap(err, "load time zone info") + } + s := state{ + rules: rules, + overrides: overrides, + history: userHistory, + now: now, + loc: tz, + } + + return s.CalculateShifts(start, end), nil +} diff --git a/override/override.go b/override/override.go new file mode 100644 index 0000000000..ab149b52c4 --- /dev/null +++ b/override/override.go @@ -0,0 +1,60 @@ +package override + +import ( + "fmt" + "github.com/target/goalert/assignment" + "github.com/target/goalert/validation" + "github.com/target/goalert/validation/validate" + "time" +) + +// A UserOverride is used to add, remove, or change which user is on call. +type UserOverride struct { + ID string `json:"id,omitempty"` + AddUserID string `json:"add_user_id,omitempty"` + RemoveUserID string `json:"remove_user_id,omitempty"` + Start time.Time `json:"start_time,omitempty"` + End time.Time `json:"end_time,omitempty"` + Target assignment.Target +} + +const debugTimeFmt = "MonJan2_2006@3:04pm" + +func (o UserOverride) String() string { + var tgt string + if o.Target != nil { + tgt = ", " + o.Target.TargetType().String() + "(" + o.Target.TargetID() + ")" + } + return fmt.Sprintf("UserOverride{Start: %s, End: %s, AddUserID: %s, RemoveUserID: %s%s}", + o.Start.Local().Format(debugTimeFmt), + o.End.Local().Format(debugTimeFmt), + o.AddUserID, + o.RemoveUserID, + tgt, + ) +} + +// Normalize will validate fields and return a normalized copy. +func (o UserOverride) Normalize() (*UserOverride, error) { + var err error + if o.AddUserID == "" && o.RemoveUserID == "" { + err = validation.NewFieldError("UserID", "must specify AddUserID and/or RemoveUserID") + } + if o.AddUserID != "" { + err = validate.Many(err, validate.UUID("AddUserID", o.AddUserID)) + } + if o.RemoveUserID != "" { + err = validate.Many(err, validate.UUID("RemoveUserID", o.RemoveUserID)) + } + if !o.Start.Before(o.End) { + err = validate.Many(err, validation.NewFieldError("End", "must occur after Start time")) + } + err = validate.Many(err, + validate.UUID("TargetID", o.Target.TargetID()), + validate.OneOf("TargetType", o.Target.TargetType(), assignment.TargetTypeSchedule), + ) + if err != nil { + return nil, err + } + return &o, nil +} diff --git a/override/search.go b/override/search.go new file mode 100644 index 0000000000..a2cad8232b --- /dev/null +++ b/override/search.go @@ -0,0 +1,173 @@ +package override + +import ( + "context" + "database/sql" + "text/template" + "time" + + "github.com/target/goalert/assignment" + "github.com/target/goalert/permission" + "github.com/target/goalert/search" + "github.com/target/goalert/validation/validate" + + "github.com/lib/pq" + "github.com/pkg/errors" +) + +// SearchOptions allow filtering and paginating the list of rotations. +type SearchOptions struct { + After SearchCursor `json:"a,omitempty"` + Limit int `json:"-"` + + // Omit specifies a list of override IDs to exclude from the results. + Omit []string + + ScheduleID string `json:"d,omitempty"` + + AddUserIDs []string `json:"u,omitempty"` + RemoveUserIDs []string `json:"r,omitempty"` + AnyUserIDs []string `json:"n,omitempty"` + + Start time.Time `json:"t,omitempty"` + End time.Time `json:"e,omitempty"` +} + +// SearchCursor is used to indicate a position in a paginated list. +type SearchCursor struct { + ID string `json:"i,omitempty"` +} + +var searchTemplate = template.Must(template.New("search").Parse(` + {{if .After.ID}} + WITH after AS ( + SELECT id, start_time, end_time + FROM user_overrides + WHERE id = :afterID + ) + {{end}} + SELECT + o.id, o.start_time, o.end_time, add_user_id, remove_user_id, tgt_schedule_id + FROM user_overrides o + {{if .After.ID}} + JOIN after ON true + {{end}} + WHERE true + {{if .Omit}} + AND not o.id = any(:omit) + {{end}} + {{if .ScheduleID}} + AND tgt_schedule_id = :scheduleID + {{end}} + {{if .AnyUserIDs}} + AND (add_user_id = any(:anyUserIDs) OR remove_user_id = any(:anyUserIDs)) + {{end}} + {{if .AddUserIDs}} + AND add_user_id = any(:addUserIDs) + {{end}} + {{if .RemoveUserIDs}} + AND remove_user_id = any(:removeUserIDs) + {{end}} + {{if not .Start.IsZero}} + AND o.end_time > :startTime + {{end}} + {{if not .End.IsZero}} + AND o.start_time <= :endTime + {{end}} + {{if .After.ID}} + AND ( + o.start_time > after.start_time OR ( + o.start_time = after.start_time AND + o.end_time > after.end_time + ) OR ( + o.start_time = after.start_time AND + o.end_time = after.end_time AND + o.id > after.id + ) + ) + {{end}} + ORDER BY o.start_time, o.end_time, o.id + LIMIT {{.Limit}} +`)) + +type renderData SearchOptions + +func (opts renderData) Normalize() (*renderData, error) { + if opts.Limit == 0 { + opts.Limit = search.DefaultMaxResults + } + + err := validate.Many( + validate.ManyUUID("AddUserIDs", opts.AddUserIDs, 10), + validate.ManyUUID("RemoveUserIDs", opts.RemoveUserIDs, 10), + validate.ManyUUID("AnyUserIDs", opts.RemoveUserIDs, 10), + validate.Range("Limit", opts.Limit, 0, search.MaxResults), + validate.ManyUUID("Omit", opts.Omit, 50), + ) + if opts.ScheduleID != "" { + err = validate.Many(err, validate.UUID("ScheduleID", opts.ScheduleID)) + } + if opts.After.ID != "" { + err = validate.Many(err, validate.UUID("After.ID", opts.After.ID)) + } + + return &opts, err +} + +func (opts renderData) QueryArgs() []sql.NamedArg { + return []sql.NamedArg{ + sql.Named("afterID", opts.After.ID), + sql.Named("scheduleID", opts.ScheduleID), + sql.Named("startTime", opts.Start), + sql.Named("endTime", opts.End), + sql.Named("addUserIDs", pq.StringArray(opts.AddUserIDs)), + sql.Named("removeUserIDs", pq.StringArray(opts.RemoveUserIDs)), + sql.Named("anyUserIDs", pq.StringArray(opts.AnyUserIDs)), + sql.Named("omit", pq.StringArray(opts.Omit)), + } +} + +func (db *DB) Search(ctx context.Context, opts *SearchOptions) ([]UserOverride, error) { + err := permission.LimitCheckAny(ctx, permission.User) + if err != nil { + return nil, err + } + if opts == nil { + opts = &SearchOptions{} + } + data, err := (*renderData)(opts).Normalize() + if err != nil { + return nil, err + } + query, args, err := search.RenderQuery(ctx, searchTemplate, data) + if err != nil { + return nil, errors.Wrap(err, "render query") + } + + rows, err := db.db.QueryContext(ctx, query, args...) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, err + } + defer rows.Close() + + var result []UserOverride + var u UserOverride + var add, rem, schedID sql.NullString + for rows.Next() { + err = rows.Scan(&u.ID, &u.Start, &u.End, &add, &rem, &schedID) + if err != nil { + return nil, err + } + u.AddUserID = add.String + u.RemoveUserID = rem.String + if schedID.Valid { + u.Target = assignment.ScheduleTarget(schedID.String) + } + result = append(result, u) + } + + return result, nil +} diff --git a/override/store.go b/override/store.go new file mode 100644 index 0000000000..60f9494322 --- /dev/null +++ b/override/store.go @@ -0,0 +1,284 @@ +package override + +import ( + "context" + "database/sql" + "github.com/target/goalert/assignment" + "github.com/target/goalert/permission" + "github.com/target/goalert/util" + "github.com/target/goalert/validation" + "github.com/target/goalert/validation/validate" + "time" + + "github.com/lib/pq" + uuid "github.com/satori/go.uuid" +) + +// Store is used to manage active overrides. +type Store interface { + CreateUserOverrideTx(context.Context, *sql.Tx, *UserOverride) (*UserOverride, error) + FindOneUserOverrideTx(ctx context.Context, tx *sql.Tx, id string, forUpdate bool) (*UserOverride, error) + DeleteUserOverrideTx(context.Context, *sql.Tx, ...string) error + FindAllUserOverrides(ctx context.Context, start, end time.Time, t assignment.Target) ([]UserOverride, error) + UpdateUserOverride(context.Context, *UserOverride) error + UpdateUserOverrideTx(context.Context, *sql.Tx, *UserOverride) error + Search(context.Context, *SearchOptions) ([]UserOverride, error) +} + +// DB implements the Store interface using a Postgres DB as a backend. +type DB struct { + db *sql.DB + + findUO *sql.Stmt + createUO *sql.Stmt + deleteUO *sql.Stmt + findAllUO *sql.Stmt + updateUO *sql.Stmt + + findUOUpdate *sql.Stmt +} + +// NewDB initializes a new DB using an existing sql connection. +func NewDB(ctx context.Context, db *sql.DB) (*DB, error) { + p := &util.Prepare{DB: db, Ctx: ctx} + + return &DB{ + db: db, + findUOUpdate: p.P(` + select + id, + add_user_id, + remove_user_id, + start_time, + end_time, + tgt_schedule_id + from user_overrides + where id = $1 + for update + `), + findUO: p.P(` + select + id, + add_user_id, + remove_user_id, + start_time, + end_time, + tgt_schedule_id + from user_overrides + where id = $1 + `), + updateUO: p.P(` + update user_overrides + set + add_user_id = $2, + remove_user_id = $3, + start_time = $4, + end_time = $5, + tgt_schedule_id = $6 + where id = $1 + `), + createUO: p.P(` + insert into user_overrides ( + id, + add_user_id, + remove_user_id, + start_time, + end_time, + tgt_schedule_id + ) values ($1, $2, $3, $4, $5, $6)`), + deleteUO: p.P(`delete from user_overrides where id = any($1)`), + findAllUO: p.P(` + select + id, + add_user_id, + remove_user_id, + start_time, + end_time + from user_overrides + where + tgt_schedule_id = $1 and + (start_time, end_time) OVERLAPS ($2, $3) + `), + }, p.Err +} +func wrap(stmt *sql.Stmt, tx *sql.Tx) *sql.Stmt { + if tx == nil { + return stmt + } + return tx.Stmt(stmt) +} + +func (db *DB) FindOneUserOverrideTx(ctx context.Context, tx *sql.Tx, id string, forUpdate bool) (*UserOverride, error) { + err := permission.LimitCheckAny(ctx, permission.User, permission.Admin) + if err != nil { + return nil, err + } + err = validate.UUID("OverrideID", id) + if err != nil { + return nil, err + } + + stmt := db.findUO + if forUpdate { + stmt = db.findUOUpdate + } + if tx != nil { + stmt = tx.StmtContext(ctx, stmt) + } + + var o UserOverride + var add, rem, schedTgt sql.NullString + err = stmt.QueryRowContext(ctx, id).Scan(&o.ID, &add, &rem, &o.Start, &o.End, &schedTgt) + if err != nil { + return nil, err + } + o.AddUserID = add.String + o.RemoveUserID = rem.String + if schedTgt.Valid { + o.Target = assignment.ScheduleTarget(schedTgt.String) + } + + return &o, nil +} + +// UpdateUserOverrideTx updates an existing UserOverride, inside an optional transaction. +func (db *DB) UpdateUserOverrideTx(ctx context.Context, tx *sql.Tx, o *UserOverride) error { + err := permission.LimitCheckAny(ctx, permission.User, permission.Admin) + if err != nil { + return err + } + n, err := o.Normalize() + if err != nil { + return err + } + err = validate.UUID("ID", n.ID) + if err != nil { + return err + } + if !n.End.After(time.Now()) { + return validation.NewFieldError("End", "must be in the future") + } + var add, rem sql.NullString + if n.AddUserID != "" { + add.Valid = true + add.String = n.AddUserID + } + if n.RemoveUserID != "" { + rem.Valid = true + rem.String = n.RemoveUserID + } + var schedTgt sql.NullString + if n.Target.TargetType() == assignment.TargetTypeSchedule { + schedTgt.Valid = true + schedTgt.String = n.Target.TargetID() + } + stmt := db.updateUO + if tx != nil { + stmt = tx.StmtContext(ctx, stmt) + } + _, err = stmt.ExecContext(ctx, n.ID, add, rem, n.Start, n.End, schedTgt) + return err +} + +// UpdateUserOverride updates an existing UserOverride. +func (db *DB) UpdateUserOverride(ctx context.Context, o *UserOverride) error { + return db.UpdateUserOverrideTx(ctx, nil, o) +} + +// CreateUserOverrideTx adds a UserOverride to the DB with a new ID. +func (db *DB) CreateUserOverrideTx(ctx context.Context, tx *sql.Tx, o *UserOverride) (*UserOverride, error) { + err := permission.LimitCheckAny(ctx, permission.User, permission.Admin) + if err != nil { + return nil, err + } + n, err := o.Normalize() + if err != nil { + return nil, err + } + if !n.End.After(time.Now()) { + return nil, validation.NewFieldError("End", "must be in the future") + } + n.ID = uuid.NewV4().String() + var add, rem sql.NullString + if n.AddUserID != "" { + add.Valid = true + add.String = n.AddUserID + } + if n.RemoveUserID != "" { + rem.Valid = true + rem.String = n.RemoveUserID + } + var schedTgt sql.NullString + if n.Target.TargetType() == assignment.TargetTypeSchedule { + schedTgt.Valid = true + schedTgt.String = n.Target.TargetID() + } + _, err = wrap(db.createUO, tx).ExecContext(ctx, n.ID, add, rem, n.Start, n.End, schedTgt) + if err != nil { + return nil, err + } + + return n, nil +} + +// DeleteUserOverride removes a UserOverride from the DB matching the given ID. +func (db *DB) DeleteUserOverrideTx(ctx context.Context, tx *sql.Tx, ids ...string) error { + err := permission.LimitCheckAny(ctx, permission.User, permission.Admin) + if err != nil { + return err + } + if len(ids) == 0 { + return nil + } + err = validate.ManyUUID("UserOverrideID", ids, 50) + if err != nil { + return err + } + + _, err = wrap(db.deleteUO, tx).ExecContext(ctx, pq.StringArray(ids)) + return err +} + +// FindAllUserOverrides will return all UserOverrides that belong to the provided Target within the provided time range. +func (db *DB) FindAllUserOverrides(ctx context.Context, start, end time.Time, t assignment.Target) ([]UserOverride, error) { + err := permission.LimitCheckAny(ctx, permission.User, permission.Admin) + if err != nil { + return nil, err + } + err = validate.Many( + validate.OneOf("TargetType", t.TargetType(), assignment.TargetTypeSchedule), + validate.UUID("TargetID", t.TargetID()), + ) + if err != nil { + return nil, err + } + + var schedTgt sql.NullString + if t.TargetType() == assignment.TargetTypeSchedule { + schedTgt.Valid = true + schedTgt.String = t.TargetID() + } + + rows, err := db.findAllUO.QueryContext(ctx, schedTgt, start, end) + if err != nil { + return nil, err + } + defer rows.Close() + + var result []UserOverride + var o UserOverride + var add, rem sql.NullString + o.Target = t + for rows.Next() { + err = rows.Scan(&o.ID, &add, &rem, &o.Start, &o.End) + if err != nil { + return nil, err + } + // no need to check `Valid` since we're find with the empty string + o.AddUserID = add.String + o.RemoveUserID = rem.String + result = append(result, o) + } + + return result, nil +} diff --git a/permission/checker.go b/permission/checker.go new file mode 100644 index 0000000000..0e2ef04d20 --- /dev/null +++ b/permission/checker.go @@ -0,0 +1,158 @@ +package permission + +import ( + "context" + "strings" + "sync/atomic" + + "go.opencensus.io/trace" +) + +// A Checker is used to give a pass-or-fail result for a given context. +type Checker func(context.Context) bool + +func checkLimit(ctx context.Context) error { + n, ok := ctx.Value(contextKeyCheckCount).(*uint64) + if !ok { + return newGeneric(false, "invalid auth context for check limit") + } + max, ok := ctx.Value(contextKeyCheckCountMax).(uint64) + if !ok { + return newGeneric(false, "invalid auth context for max check limit") + } + + v := atomic.AddUint64(n, 1) // always add + if max > 0 && v > max { + return newGeneric(false, "exceeded auth check limit") + } + + return nil +} + +// LimitCheckAny will return a permission error if none of the checks pass, or +// if the auth check limit is reached. If no checks are provided, only +// the limit check, and a check that the context has SOME type authorization is +// performed. nil can be passed as an always-fail check option (useful to prevent +// the no-check behavior, if required). +func LimitCheckAny(ctx context.Context, checks ...Checker) error { + if !All(ctx) { + return newGeneric(true, "") + } + + // ensure we don't get hammered with auth checks (or DB calls, for example) + err := checkLimit(ctx) + if err != nil { + return err + } + + if len(checks) == 0 { + return nil + } + for _, c := range checks { + if c != nil && c(ctx) { + addAuthAttrs(ctx) + return nil + } + } + + return newGeneric(false, "") +} + +func addAuthAttrs(ctx context.Context) { + sp := trace.FromContext(ctx) + if sp == nil { + return + } + var attrs []trace.Attribute + if User(ctx) { + attrs = append(attrs, + sourceAttrs(ctx, + trace.StringAttribute("auth.user.id", UserID(ctx)), + trace.StringAttribute("auth.user.role", string(userRole(ctx))), + )...) + } + if System(ctx) { + attrs = append(attrs, trace.StringAttribute("auth.system.componentName", SystemComponentName(ctx))) + } + if Service(ctx) { + attrs = append(attrs, sourceAttrs(ctx, + trace.StringAttribute("auth.service.id", ServiceID(ctx)), + )...) + } + if len(attrs) == 0 { + return + } + sp.AddAttributes(attrs...) +} + +// All is a Checker that checks against ALL providers, returning true +// if any are found. +func All(ctx context.Context) bool { + if v, ok := ctx.Value(contextHasAuth).(int); ok && v == 1 { + return true + } + + return false +} + +// Admin is a Checker that determines if a context has the Admin role. +func Admin(ctx context.Context) bool { + r, ok := ctx.Value(contextKeyUserRole).(Role) + if ok && r == RoleAdmin { + return true + } + + return false +} + +// User is a Checker that determines if a context has the User or Admin role. +func User(ctx context.Context) bool { + r, ok := ctx.Value(contextKeyUserRole).(Role) + if ok && (r == RoleUser || r == RoleAdmin) { + return true + } + + return false +} + +// Service is a Checker that determines if a context has a serviceID. +func Service(ctx context.Context) bool { + return ServiceID(ctx) != "" +} + +// System is a Checker that determines if a context has system privileges. +func System(ctx context.Context) bool { + return SystemComponentName(ctx) != "" +} + +// Team is a Checker that determines if a context has team privileges. +func Team(ctx context.Context) bool { + return TeamID(ctx) != "" +} + +// MatchService will return a Checker that ensures the context has the given ServiceID. +func MatchService(serviceID string) Checker { + return func(ctx context.Context) bool { + if serviceID == "" { + return false + } + return ServiceID(ctx) == strings.ToLower(serviceID) + } +} + +// MatchTeam will return a Checker that ensures the context has the given TeamID. +func MatchTeam(teamID string) Checker { + return func(ctx context.Context) bool { + return TeamID(ctx) == strings.ToLower(teamID) + } +} + +// MatchUser will return a Checker that ensures the context has the given UserID. +func MatchUser(userID string) Checker { + return func(ctx context.Context) bool { + if userID == "" { + return false + } + return UserID(ctx) == strings.ToLower(userID) + } +} diff --git a/permission/context.go b/permission/context.go new file mode 100644 index 0000000000..fb67fc1854 --- /dev/null +++ b/permission/context.go @@ -0,0 +1,215 @@ +package permission + +import ( + "context" + "errors" + "github.com/target/goalert/util/log" + "regexp" + "strings" + "sync/atomic" + + "go.opencensus.io/trace" +) + +// SourceContext will return a context with the provided SourceInfo. +func SourceContext(ctx context.Context, src *SourceInfo) context.Context { + if src == nil { + return ctx + } + // make a copy, so it's read-only + dup := *src + ctx = log.WithField(ctx, "AuthSource", src.String()) + return context.WithValue(ctx, contextKeySourceInfo, dup) +} + +// Source will return the SourceInfo associated with a context. +func Source(ctx context.Context) *SourceInfo { + src, ok := ctx.Value(contextKeySourceInfo).(SourceInfo) + if !ok { + return nil + } + return &src +} + +func sourceAttrs(ctx context.Context, extra ...trace.Attribute) []trace.Attribute { + src := Source(ctx) + if src == nil { + return extra + } + return append([]trace.Attribute{ + trace.StringAttribute("auth.source.type", src.Type.String()), + trace.StringAttribute("auth.source.id", src.ID), + }, extra...) +} + +// UserSourceContext behaves like UserContext, but provides SourceInfo about the authorization. +func UserSourceContext(ctx context.Context, id string, r Role, src *SourceInfo) context.Context { + ctx = SourceContext(ctx, src) + ctx = UserContext(ctx, id, r) + return ctx +} + +// UserContext will return a context authenticated with the users privileges. +func UserContext(ctx context.Context, id string, r Role) context.Context { + id = strings.ToLower(id) + ctx = context.WithValue(ctx, contextHasAuth, 1) + ctx = context.WithValue(ctx, contextKeyUserID, id) + ctx = ensureAuthCheckCountContext(ctx) + ctx = context.WithValue(ctx, contextKeyUserRole, r) + ctx = log.WithField(ctx, "AuthUserID", id) + trace.FromContext(ctx).Annotate(sourceAttrs(ctx, + trace.StringAttribute("auth.user.id", id), + trace.StringAttribute("auth.user.role", string(r)), + ), "Authorized as User.") + return ctx +} + +var sysRx = regexp.MustCompile(`^([a-zA-Z0-9]+|Sudo\[[a-zA-Z0-9]+\])$`) + +// SystemContext will return a new context with the system privileges. +// Name must be alphanumeric. +func SystemContext(ctx context.Context, componentName string) context.Context { + if !sysRx.MatchString(componentName) { + panic(errors.New("invalid system component name: " + componentName)) + } + ctx = context.WithValue(ctx, contextHasAuth, 1) + ctx = context.WithValue(ctx, contextKeySystem, componentName) + ctx = AuthCheckCountContext(ctx, 0) + ctx = log.WithField(ctx, "AuthSystemComponent", componentName) + trace.FromContext(ctx).Annotate([]trace.Attribute{trace.StringAttribute("auth.system.componentName", componentName)}, "Authorized as System.") + return ctx +} + +// AuthCheckCount will return the current number of authorization checks as +// well as the maximum. +func AuthCheckCount(ctx context.Context) (value, max uint64) { + val, ok := ctx.Value(contextKeyCheckCount).(*uint64) + if ok { + value = atomic.LoadUint64(val) + } + + max, _ = ctx.Value(contextKeyCheckCountMax).(uint64) + + return value, max +} + +func ensureAuthCheckCountContext(ctx context.Context) context.Context { + _, ok := ctx.Value(contextKeyCheckCount).(*uint64) + if !ok { + return AuthCheckCountContext(ctx, 0) + } + return ctx +} + +// AuthCheckCountContext will return a new context with the AuthCheckCount maximum +// set to the provided value. If max is 0, there will be no limit. +func AuthCheckCountContext(ctx context.Context, max uint64) context.Context { + val, _ := ctx.Value(contextKeyCheckCount).(*uint64) + if val == nil { + ctx = context.WithValue(ctx, contextKeyCheckCount, new(uint64)) + } + ctx = context.WithValue(ctx, contextKeyCheckCountMax, max) + + return ctx +} + +// ServiceSourceContext behaves like ServiceContext, but provides SourceInfo about the authorization. +func ServiceSourceContext(ctx context.Context, id string, src *SourceInfo) context.Context { + ctx = SourceContext(ctx, src) + ctx = ServiceContext(ctx, id) + return ctx +} + +// ServiceContext will return a new context with privileges for the given service. +func ServiceContext(ctx context.Context, serviceID string) context.Context { + serviceID = strings.ToLower(serviceID) + ctx = context.WithValue(ctx, contextHasAuth, 1) + ctx = ensureAuthCheckCountContext(ctx) + ctx = context.WithValue(ctx, contextKeyServiceID, serviceID) + ctx = log.WithField(ctx, "AuthServiceID", serviceID) + + trace.FromContext(ctx).Annotate(sourceAttrs(ctx, + trace.StringAttribute("auth.service.id", serviceID), + ), "Authorized as Service.") + + return ctx +} + +// TeamContext will return a new context with privileges for the given team. +func TeamContext(ctx context.Context, teamID string) context.Context { + teamID = strings.ToLower(teamID) + ctx = context.WithValue(ctx, contextHasAuth, 1) + ctx = context.WithValue(ctx, contextKeyCheckCount, new(uint64)) + ctx = context.WithValue(ctx, contextKeyTeamID, teamID) + ctx = log.WithField(ctx, "AuthTeamID", teamID) + + return ctx +} + +// WithoutAuth returns a context will all auth info stripped out. +func WithoutAuth(ctx context.Context) context.Context { + if System(ctx) { + ctx = context.WithValue(ctx, contextKeySystem, nil) + } + if id, ok := ctx.Value(contextKeyUserID).(string); ok && id != "" { + ctx = context.WithValue(ctx, contextKeyUserID, nil) + ctx = context.WithValue(ctx, contextKeyUserRole, nil) + } + if Service(ctx) { + ctx = context.WithValue(ctx, contextKeyServiceID, nil) + } + + v, _ := ctx.Value(contextHasAuth).(int) + if v == 1 { + ctx = context.WithValue(ctx, contextHasAuth, nil) + } + trace.FromContext(ctx).Annotate( + nil, + "Authorization dropped.", + ) + return ctx +} + +// SudoContext elevates an existing context to system level. The elevated context is automatically cancelled +// as soon as the callback returns. +func SudoContext(ctx context.Context, f func(context.Context)) { + name := "Sudo" + cname := SystemComponentName(ctx) + if cname != "" { + name += "[" + cname + "]" + } + sCtx, span := trace.StartSpan(ctx, "Auth.Sudo") + defer span.End() + sCtx, cancel := context.WithCancel(SystemContext(sCtx, name)) + defer cancel() + f(sCtx) +} + +func userRole(ctx context.Context) Role { + role, _ := ctx.Value(contextKeyUserRole).(Role) + return role +} + +// UserID will return the UserID associated with a context. +func UserID(ctx context.Context) string { + uid, _ := ctx.Value(contextKeyUserID).(string) + return uid +} + +// SystemComponentName will return the component name used to initiate a context. +func SystemComponentName(ctx context.Context) string { + name, _ := ctx.Value(contextKeySystem).(string) + return name +} + +// ServiceID will return the ServiceID associated with a context. +func ServiceID(ctx context.Context) string { + sid, _ := ctx.Value(contextKeyServiceID).(string) + return sid +} + +// TeamID will return the TeamID associated with a context. +func TeamID(ctx context.Context) string { + sid, _ := ctx.Value(contextKeyTeamID).(string) + return sid +} diff --git a/permission/context_test.go b/permission/context_test.go new file mode 100644 index 0000000000..0c98969aaf --- /dev/null +++ b/permission/context_test.go @@ -0,0 +1,76 @@ +package permission + +import ( + "context" + "testing" +) + +// ExampleSudoContext shows how to use SudoContext. +func ExampleSudoContext() { + // the original context could be from anywhere (req.Context() in an http.Handler for example) + ctx := context.Background() + SudoContext(ctx, func(ctx context.Context) { + // within this function scope, ctx now has System privileges + }) + // once the function returns, the elevated context is cancelled, but the original ctx is still valid +} + +func TestSudoContext(t *testing.T) { + SudoContext(context.Background(), func(ctx context.Context) { + if !System(ctx) { + t.Error("System(ctx) == false; want true") + } + err := LimitCheckAny(ctx, System) + if err != nil { + t.Errorf("err = %v; want nil", err) + } + err = LimitCheckAny(ctx, System, Admin, User) + if err != nil { + t.Errorf("err = %v; want nil", err) + } + }) +} + +func TestWithoutAuth(t *testing.T) { + check := func(ctx context.Context, name string) { + t.Run(name, func(t *testing.T) { + ctx = WithoutAuth(ctx) + if User(ctx) { + t.Error("User() = true; want false") + } + if Admin(ctx) { + t.Error("Admin() = true; want false") + } + if System(ctx) { + t.Error("System() = true; want false") + } + if Service(ctx) { + t.Error("Service() = true; want false") + } + if ServiceID(ctx) != "" { + t.Errorf("SeriviceID() = %s; want empty string", ServiceID(ctx)) + } + if UserID(ctx) != "" { + t.Errorf("UserID() = %s; want empty string", UserID(ctx)) + } + if SystemComponentName(ctx) != "" { + t.Errorf("SystemComponentName() = %s; want empty string", SystemComponentName(ctx)) + } + }) + } + ctx := context.Background() + data := []struct { + ctx context.Context + name string + }{ + {name: "user_role_user", ctx: UserContext(ctx, "bob", RoleUser)}, + {name: "user_role_unknown", ctx: UserContext(ctx, "bob", RoleUnknown)}, + {name: "user_role_admin", ctx: UserContext(ctx, "bob", RoleAdmin)}, + {name: "system", ctx: SystemContext(ctx, "test")}, + {name: "service", ctx: ServiceContext(ctx, "test")}, + } + + for _, d := range data { + check(d.ctx, d.name) + } +} diff --git a/permission/contextkey.go b/permission/contextkey.go new file mode 100644 index 0000000000..f601bb7d3a --- /dev/null +++ b/permission/contextkey.go @@ -0,0 +1,15 @@ +package permission + +type contextKey int + +const ( + contextKeyUserRole contextKey = iota + contextKeyUserID + contextKeyCheckCount + contextKeyServiceID + contextKeySystem + contextHasAuth + contextKeyTeamID + contextKeyCheckCountMax + contextKeySourceInfo +) diff --git a/permission/error.go b/permission/error.go new file mode 100644 index 0000000000..04d81aebb3 --- /dev/null +++ b/permission/error.go @@ -0,0 +1,65 @@ +package permission + +import "github.com/pkg/errors" + +// Error represents an auth error where the context does not have +// a sufficient role for the opertion. +type Error interface { + error + Permission() bool // Is the error permission denied? + Unauthorized() bool +} +type genericError struct { + unauthorized bool + reason string + stack errors.StackTrace +} + +type stackTracer interface { + StackTrace() errors.StackTrace +} + +func newGeneric(unauth bool, reason string) genericError { + return genericError{ + unauthorized: unauth, + reason: reason, + stack: errors.New("").(stackTracer).StackTrace()[1:], + } +} + +// NewAccessDenied will return a new generic access denied error. +func NewAccessDenied(reason string) error { + return newGeneric(false, reason) +} + +func (e genericError) ClientError() bool { return true } + +func (e genericError) Permission() bool { return true } +func (e genericError) Unauthorized() bool { return e.unauthorized } +func (e genericError) Error() string { + prefix := "access denied" + if e.unauthorized { + prefix = "unauthorized" + } + if e.reason == "" { + return prefix + } + + return prefix + ": " + e.reason +} + +// IsPermissionError will determine if the root error cause is a permission error. +func IsPermissionError(err error) bool { + if e, ok := errors.Cause(err).(Error); ok && e.Permission() { + return true + } + return false +} + +// IsUnauthorized will determine if the root error cause is an unauthorized permission error. +func IsUnauthorized(err error) bool { + if e, ok := errors.Cause(err).(Error); ok && e.Permission() && e.Unauthorized() { + return true + } + return false +} diff --git a/permission/permission.go b/permission/permission.go new file mode 100644 index 0000000000..76778b0293 --- /dev/null +++ b/permission/permission.go @@ -0,0 +1,12 @@ +/* + +Package permission handles checking and granting of permissions using context.Context. + +A context can be granted User, System, or Service privileges using UserContext, SystemContext, or ServiceContext, respectively. + +Data can be extracted using the appropriate method (e.g. UserID, ServiceID, etc...) + +Context can then be validated using Checkers (e.g. like the User function) or by using LimitCheckAny and a number of Checkers together. + +*/ +package permission diff --git a/permission/permission_test.go b/permission/permission_test.go new file mode 100644 index 0000000000..0fb7d08ba6 --- /dev/null +++ b/permission/permission_test.go @@ -0,0 +1,20 @@ +package permission + +import ( + "context" + "fmt" +) + +func ExampleUserContext() { + // start with any context + ctx := context.Background() + + // pass it through UserContext to assign a user ID and Role + ctx = UserContext(ctx, "user-id-here", RoleAdmin) + + // later on it can be checked anywhere; this example will satisfy the Admin role requirement + err := LimitCheckAny(ctx, Admin) + + fmt.Println(err) + // output: +} diff --git a/permission/role.go b/permission/role.go new file mode 100644 index 0000000000..a3c67d2d8d --- /dev/null +++ b/permission/role.go @@ -0,0 +1,44 @@ +package permission + +import ( + "database/sql/driver" + "fmt" +) + +// Role represents a users access level +type Role string + +// Available roles +const ( + RoleUser Role = "user" + RoleAdmin Role = "admin" + RoleUnknown Role = "unknown" +) + +// Scan handles reading a Role from the DB format +func (r *Role) Scan(value interface{}) error { + switch t := value.(type) { + case []byte: + *r = Role(t) + case string: + *r = Role(t) + default: + return fmt.Errorf("could not process unknown type for role %T", t) + } + + if *r != RoleAdmin && *r != RoleUser && *r != RoleUnknown { + return fmt.Errorf("unknown value for role %v", *r) + } + + return nil +} + +// Value converts the Role to the DB representation +func (r Role) Value() (driver.Value, error) { + switch r { + case RoleUser, RoleAdmin, RoleUnknown: + return string(r), nil + default: + return nil, fmt.Errorf("invalid role value: %v", r) + } +} diff --git a/permission/source.go b/permission/source.go new file mode 100644 index 0000000000..118f769e48 --- /dev/null +++ b/permission/source.go @@ -0,0 +1,44 @@ +package permission + +//go:generate go run golang.org/x/tools/cmd/stringer -type SourceType + +// SourceType describes a type of authentication used to authorize a context. +type SourceType int + +const ( + // SourceTypeNotificationCallback is set when a context is authenticated via the response to an outgoing notification. + SourceTypeNotificationCallback SourceType = iota + + // SourceTypeIntegrationKey is set when an integration key is used to provide permission on a context. + SourceTypeIntegrationKey + + // SourceTypeAuthProvider is set when a provider from the auth package is used (e.g. the web UI). + SourceTypeAuthProvider + + // SourceTypeContactMethod is set when a context is authorized for use of a user's contact method. + SourceTypeContactMethod + + // SourceTypeHeartbeat is set when a context is authorized for use of a service's heartbeat. + SourceTypeHeartbeat + + //SourceTypeNotificationChannel is set when a context is authorized for use of a notification channel. + SourceTypeNotificationChannel +) + +// SourceInfo provides information about the source of a context's authorization. +type SourceInfo struct { + Type SourceType + ID string +} + +func (s SourceInfo) String() string { + str := s.Type.String() + if s.ID != "" { + // using curly-braces so that it doesn't look too confusing + // if we ever run into an unknown source type + // + // unknown will show up as SourceType(n) where n is the int value. + str += "{" + s.ID + "}" + } + return str +} diff --git a/permission/sourcetype_string.go b/permission/sourcetype_string.go new file mode 100644 index 0000000000..7bac44f7b5 --- /dev/null +++ b/permission/sourcetype_string.go @@ -0,0 +1,28 @@ +// Code generated by "stringer -type SourceType"; DO NOT EDIT. + +package permission + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[SourceTypeNotificationCallback-0] + _ = x[SourceTypeIntegrationKey-1] + _ = x[SourceTypeAuthProvider-2] + _ = x[SourceTypeContactMethod-3] + _ = x[SourceTypeHeartbeat-4] + _ = x[SourceTypeNotificationChannel-5] +} + +const _SourceType_name = "SourceTypeNotificationCallbackSourceTypeIntegrationKeySourceTypeAuthProviderSourceTypeContactMethodSourceTypeHeartbeatSourceTypeNotificationChannel" + +var _SourceType_index = [...]uint8{0, 30, 54, 76, 99, 118, 147} + +func (i SourceType) String() string { + if i < 0 || i >= SourceType(len(_SourceType_index)-1) { + return "SourceType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _SourceType_name[_SourceType_index[i]:_SourceType_index[i+1]] +} diff --git a/permission/store.go b/permission/store.go new file mode 100644 index 0000000000..0fa0ccea13 --- /dev/null +++ b/permission/store.go @@ -0,0 +1 @@ +package permission diff --git a/remotemonitor/config.go b/remotemonitor/config.go new file mode 100644 index 0000000000..65304d15bf --- /dev/null +++ b/remotemonitor/config.go @@ -0,0 +1,26 @@ +package remotemonitor + +// Config contains all necessary values for remote monitoring. +type Config struct { + // Location is the unique location name of this monitor. + Location string + + // PublicURL is the publicly-routable base URL for this monitor. + // It must match what is configured for twilio SMS. + PublicURL string + + // ListenAddr is the address and port to bind to. + ListenAddr string + + // CheckMinutes denotes the number of minutes between checks (for all instances). + CheckMinutes int + + Twilio struct { + AccountSID string + AuthToken string + FromNumber string + } + + // Instances determine what remote GoAlert instances will be monitored and send potential errors. + Instances []Instance +} diff --git a/remotemonitor/doc.go b/remotemonitor/doc.go new file mode 100644 index 0000000000..ac1cdfb0a1 --- /dev/null +++ b/remotemonitor/doc.go @@ -0,0 +1,18 @@ +package remotemonitor + +/* + + Remote monitor allows monitoring external functionality and communication of GoAlert instances. + + ## GoAlert Configuration + + GoAlert instances being monitored must have the following configured: + - A "main" service for heartbeats and errors + - "main" service should have a heartbeat defined for each remote monitor + - "main" service EP should immediately notify someone + - A "monitor" service for each remote monitor (used for creating test alerts) + - A "monitor" user for each remote monitor with an SMS contact method, and immediate notification rule + - "monitor" service EP steps should point to the corresponding user + - "monitor" service EP should wait at least 1 minute before escalating to someone + +*/ diff --git a/remotemonitor/instance.go b/remotemonitor/instance.go new file mode 100644 index 0000000000..a6a94c6c19 --- /dev/null +++ b/remotemonitor/instance.go @@ -0,0 +1,90 @@ +package remotemonitor + +import ( + "net/http" + "net/url" + "sync" + + "github.com/pkg/errors" + "github.com/target/goalert/util" +) + +// An Instance represents a running remote GoAlert instance to monitor. +type Instance struct { + // Location must be unique. + Location string + + // TestAPIKey is used to create test alerts. + // The service it points to should have an escalation policy that allows at least 60 seconds + // before escalating to a human. It should send initial notifications to the monitor via SMS. + TestAPIKey string + + // ErrorAPIKey is the key used to create new alerts for encountered errors. + ErrorAPIKey string + + // HeartbeatURLs are sent a POST request after a successful test cycle for this instance. + HeartbeatURLs []string + + // PublicURL should point to the publicly-routable base of the instance. + PublicURL string + + // Phone is the number that incomming SMS messages from this instances will be from. + // Must be unique between all instances. + Phone string + + // ErrorsOnly, if set, will disable creating test alerts for the instance. Any error-alerts will + // still be generated, however. + ErrorsOnly bool +} + +func (i *Instance) doReq(path string, v url.Values) error { + u, err := util.JoinURL(i.PublicURL, path) + if err != nil { + return err + } + resp, err := http.PostForm(u, v) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode/100 != 2 { + return errors.Errorf("non-200 response: %s", resp.Status) + } + return nil +} + +func (i *Instance) createAlert(key, dedup, summary, details string) error { + v := make(url.Values) + v.Set("token", key) + v.Set("summary", summary) + v.Set("details", details) + v.Set("dedup", dedup) + return i.doReq("/api/v2/generic/incoming", v) +} +func (i *Instance) heartbeat() []error { + errCh := make(chan error, len(i.HeartbeatURLs)) + var wg sync.WaitGroup + for _, u := range i.HeartbeatURLs { + wg.Add(1) + go func(u string) { + defer wg.Done() + resp, err := http.Post(u, "", nil) + if err != nil { + errCh <- err + return + } + defer resp.Body.Close() + if resp.StatusCode/100 != 2 { + errCh <- errors.Errorf("non-200 response: %s", resp.Status) + } + }(u) + } + wg.Wait() + close(errCh) + var errs []error + for err := range errCh { + errs = append(errs, err) + } + + return errs +} diff --git a/remotemonitor/monitor.go b/remotemonitor/monitor.go new file mode 100644 index 0000000000..9d4205e3b0 --- /dev/null +++ b/remotemonitor/monitor.go @@ -0,0 +1,184 @@ +package remotemonitor + +import ( + "context" + "fmt" + "github.com/target/goalert/config" + "github.com/target/goalert/notification/twilio" + "io" + "log" + "net" + "net/http" + "net/url" + "strings" + "time" +) + +// Monitor will check for functionality and communication between itself and one or more instances. +// Each monitor should have a unique phone number and location. +type Monitor struct { + appCfg config.Config + cfg Config + tw twilio.Config + shutdownCh chan struct{} + startCh chan string + finishCh chan string + pendingCh chan int + pending map[string]time.Time + srv *http.Server +} + +// NewMonitor creates and starts a new Monitor with the given Config. +func NewMonitor(cfg Config) (*Monitor, error) { + http.DefaultTransport.(*http.Transport).DisableKeepAlives = true + http.DefaultTransport = &requestIDTransport{ + RoundTripper: http.DefaultTransport, + } + u, err := url.Parse(cfg.PublicURL) + if err != nil { + return nil, err + } + m := &Monitor{cfg: cfg, + tw: twilio.Config{}, + shutdownCh: make(chan struct{}), + startCh: make(chan string), + finishCh: make(chan string), + pendingCh: make(chan int), + pending: make(map[string]time.Time), + } + l, err := net.Listen("tcp", cfg.ListenAddr) + if err != nil { + return nil, err + } + h := twilio.WrapValidation(m, m.tw) + mux := http.NewServeMux() + mux.HandleFunc("/health", func(w http.ResponseWriter, req *http.Request) { io.WriteString(w, "ok") }) + m.appCfg.General.PublicURL = cfg.PublicURL + m.appCfg.Twilio.Enable = true + m.appCfg.Twilio.AccountSID = cfg.Twilio.AccountSID + m.appCfg.Twilio.AuthToken = cfg.Twilio.AuthToken + m.appCfg.Twilio.FromNumber = cfg.Twilio.FromNumber + mux.Handle("/", twilio.WrapHeaderHack(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + req.URL.Path = strings.TrimPrefix(req.URL.Path, u.Path) + + h.ServeHTTP(w, req) + }))) + m.srv = &http.Server{ + Handler: config.Handler(mux, config.Static(m.appCfg)), + IdleTimeout: 15 * time.Second, + ReadHeaderTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + WriteTimeout: 15 * time.Second, + MaxHeaderBytes: 1024 * 1024, + } + + m.srv.SetKeepAlivesEnabled(false) + + log.Println("Listening:", l.Addr()) + + go m.serve(l) + go m.loop() + go m.waitLoop() + + return m, nil +} +func (m *Monitor) serve(l net.Listener) { + err := m.srv.Serve(l) + if err != nil && err != http.ErrServerClosed { + log.Fatalln("ERROR:", err) + } +} + +func (m *Monitor) reportErr(i Instance, err error, action string) { + if err == nil { + return + } + summary := fmt.Sprintf("Remote Monitor in %s failed to %s in %s", m.cfg.Location, action, i.Location) + details := fmt.Sprintf("Monitor Location: %s\nInstance Location: %s\nAction: %s\nError: %s", m.cfg.Location, i.Location, action, err.Error()) + for _, ins := range m.cfg.Instances { + if ins.ErrorAPIKey == "" { + log.Println("No ErrorAPIKey for", ins.Location) + continue + } + go ins.createAlert(ins.ErrorAPIKey, "", summary, details) + } + log.Println("ERROR:", summary) +} +func (m *Monitor) waitLoop() { + t := time.NewTicker(100 * time.Millisecond) + for { + select { + case <-t.C: + for k, v := range m.pending { + if time.Since(v) > time.Minute { + delete(m.pending, k) + } + } + case name := <-m.startCh: + m.pending[name] = time.Now() + case name := <-m.finishCh: + delete(m.pending, name) + } + + select { + case m.pendingCh <- len(m.pending): + default: + } + } +} +func (m *Monitor) loop() { + delay := time.Duration(m.cfg.CheckMinutes) * time.Minute + t := time.NewTicker(delay) + + dedup := fmt.Sprintf("RemoteMonitor:Check:%s", m.cfg.Location) + summary := fmt.Sprintf("Remote Monitor Communication Test from %s", m.cfg.Location) + details := fmt.Sprintf(`This alert was generated by a GoAlert Remote Monitor running in %s. + +These alerts are generated periodically to monitor actual system functionality and communication. + +If it is not automatically closed within a minute, there may be a problem with SMS or network connectivity. +`, m.cfg.Location) + + doCheck := func() { + for _, i := range m.cfg.Instances { + if i.ErrorsOnly { + continue + } + m.startCh <- i.Location + go func(i Instance) { + err := i.createAlert(i.TestAPIKey, dedup, summary, details) + if err != nil { + m.reportErr(i, err, "create new alert") + } + }(i) + } + } + doCheck() + for { + select { + case <-m.shutdownCh: + return + case <-t.C: + doCheck() + } + } +} + +// context will return a new background context with config applied. +func (m *Monitor) context() context.Context { + return m.appCfg.Context(context.Background()) +} + +// Shutdown gracefully shuts down the monitor, waiting for any in-flight checks to complete. +func (m *Monitor) Shutdown(ctx context.Context) error { + log.Println("Beginning shutdown...") + close(m.shutdownCh) + for n := range m.pendingCh { + if n == 0 { + // wait for all pending operations to finish or timeout + break + } + } + + return m.srv.Shutdown(ctx) +} diff --git a/remotemonitor/requestid.go b/remotemonitor/requestid.go new file mode 100644 index 0000000000..a457be2cf3 --- /dev/null +++ b/remotemonitor/requestid.go @@ -0,0 +1,21 @@ +package remotemonitor + +import ( + "log" + "net/http" + + uuid "github.com/satori/go.uuid" +) + +type requestIDTransport struct { + http.RoundTripper +} + +func (r *requestIDTransport) RoundTrip(req *http.Request) (*http.Response, error) { + q := req.URL.Query() + q.Set("x-request-id", uuid.NewV4().String()) + req.URL.RawQuery = q.Encode() + + log.Println(req.Method, req.URL.String()) + return r.RoundTripper.RoundTrip(req) +} diff --git a/remotemonitor/sms.go b/remotemonitor/sms.go new file mode 100644 index 0000000000..bdd4ba5c3d --- /dev/null +++ b/remotemonitor/sms.go @@ -0,0 +1,80 @@ +package remotemonitor + +import ( + "log" + "net/http" + "regexp" + "strconv" + "strings" +) + +var numRx = regexp.MustCompile(`^\+\d{1,15}$`) + +func validPhone(n string) string { + if !numRx.MatchString(n) { + return "" + } + + return n +} +func (m *Monitor) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if req.URL.Path == "/v1/twilio/sms/status" || req.URL.Path == "/api/v2/twilio/message/status" { + // ignore status notifications + return + } + from := validPhone(req.FormValue("From")) + to := validPhone(req.FormValue("To")) + if from == "" || to == "" { + http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) + return + } + if from == m.cfg.Twilio.FromNumber || to != m.cfg.Twilio.FromNumber { + // status or something we don't care about + return + } + + body := req.FormValue("Body") + go m.processSMS(from, body) +} +func (m *Monitor) sendSMS(to, body string) { + msg, err := m.tw.SendSMS(m.context(), to, body, nil) + if err != nil { + log.Printf("ERROR: sending '%s' to %s: %s\n", strconv.Quote(body), to, err.Error()) + return + } + log.Printf("SENT SMS: %s -> %s %s; SID=%s; Status=%s\n", msg.From, msg.To, strconv.Quote(body), msg.SID, msg.Status) +} + +var actionRx = regexp.MustCompile(`'(\d+c)'`) + +func (m *Monitor) processSMS(from, body string) { + log.Println("INCOMING SMS:", from, strconv.Quote(body)) + var i Instance + var found bool + for _, search := range m.cfg.Instances { + if search.Phone == from { + i = search + found = true + break + } + } + if !found { + log.Println("ERROR: unknown SMS source:", from, strconv.Quote(body)) + return + } + + if strings.Contains(strings.ToLower(body), "closed") { + for _, err := range i.heartbeat() { + m.reportErr(i, err, "post to heartbeat endpoint") + } + m.finishCh <- i.Location + return + } + + if p := actionRx.FindStringSubmatch(body); len(p) == 2 { + m.sendSMS(from, p[1]) + return + } + + log.Println("ERROR: unrecognized SMS message:", from, body) +} diff --git a/retry/do.go b/retry/do.go new file mode 100644 index 0000000000..acfa2bc5bb --- /dev/null +++ b/retry/do.go @@ -0,0 +1,81 @@ +package retry + +import ( + "context" + "github.com/target/goalert/util/log" + "math/rand" + "time" + + "github.com/pkg/errors" +) + +var _fib = []int{0, 1} + +func fib(n int) int { + for i := len(_fib) - 1; i < n; i++ { + _fib = append(_fib, _fib[i-1]+_fib[i]) + } + return _fib[n] +} +func init() { + fib(30) +} + +// DoFunc is a function that can be retried. It is passed the current attempt number (starting with 0) +// and should return true if a retry should be attempted. +type DoFunc func(int) (bool, error) + +// An Option takes the attempt number and the last error value (can be nil) and should indicate +// if a retry should be made. +type Option func(int, error) bool + +// Do will retry the given DoFunc until it or an option returns false. The last returned +// error value (can be nil) of fn will be returned. +func Do(fn DoFunc, opts ...Option) error { + var n int + var err error + var retry bool + var opt Option + for { + for _, opt = range opts { + if !opt(n, err) { + return err + } + } + retry, err = fn(n) + if !retry { + return err + } + n++ + } +} + +// Log will log all errors between retries returned from the DoFunc. The final error, if any, is not logged. +func Log(ctx context.Context) Option { + return func(a int, err error) bool { + if a == 0 || err == nil { + return true + } + log.Log(log.WithField(ctx, "RetryAttempt", a-1), errors.Wrap(err, "will retry")) + return true + } +} + +// Limit will set the max number of retry attempts (including the initial attempt). +func Limit(n int) Option { + return func(a int, _ error) bool { + return a < n + } +} + +// FibBackoff will Sleep for f(n) * Duration (+/- 50ms) before each attempt, where f(n) is the value from the Fibonacci sequence for +// the nth attempt. There is no delay for the first attempt (n=0). +func FibBackoff(d time.Duration) Option { + return func(a int, _ error) bool { + if a == 0 { + return true + } + time.Sleep(time.Duration(fib(a))*d + time.Duration(rand.Intn(100)-50)*time.Millisecond) + return true + } +} diff --git a/retry/temporary.go b/retry/temporary.go new file mode 100644 index 0000000000..22611e273a --- /dev/null +++ b/retry/temporary.go @@ -0,0 +1,59 @@ +package retry + +import ( + "database/sql" + "database/sql/driver" + "net" + + "github.com/lib/pq" + "github.com/pkg/errors" +) + +type clientErr interface { + ClientError() bool +} + +// IsTemporaryError will determine if an error is temporary, and thus +// the action can/should be retried. +func IsTemporaryError(err error) bool { + if err == nil { + return false + } + if e, ok := err.(clientErr); ok && e.ClientError() { + return false + } + cause := errors.Cause(err) + if _, ok := cause.(net.Error); ok { + return true + } + if cause == sql.ErrConnDone { + return true + } + if cause == driver.ErrBadConn { + return true + } + if pqe, ok := cause.(*pq.Error); ok { + switch pqe.Code.Class() { + // Allow retry for tx or connection errors: + // - Class 40 — Transaction Rollback + // - Class 08 — Connection Exception + // + // https://www.postgresql.org/docs/10/static/errcodes-appendix.html + case "40", "08": + return true + } + } + return false +} + +// DoTempFunc is a simplified version of DoFunc that just returns an error value. +type DoTempFunc func(int) error + +// DoTemporaryError will retry as long as the error returned from fn is +// temporary as defined by IsTemporaryError. +func DoTemporaryError(fn DoTempFunc, opts ...Option) error { + return Do(func(n int) (bool, error) { + err := fn(n) + return IsTemporaryError(err), err + }, opts...) +} diff --git a/schedule/rotation/participant.go b/schedule/rotation/participant.go new file mode 100644 index 0000000000..5b5aef57c0 --- /dev/null +++ b/schedule/rotation/participant.go @@ -0,0 +1,29 @@ +package rotation + +import ( + "github.com/target/goalert/assignment" + "github.com/target/goalert/validation/validate" +) + +type Participant struct { + ID string `json:"id"` + Position int `json:"position"` + RotationID string `json:"rotation_id"` + Target assignment.Target +} + +func (p Participant) Normalize() (*Participant, error) { + err := validate.Many( + validate.UUID("RotationID", p.RotationID), + validate.UUID("TargetID", p.Target.TargetID()), + validate.OneOf("TargetType", p.Target.TargetType(), + assignment.TargetTypeUser, + ), + validate.Range("Position", p.Position, 0, 9000), + ) + + if err != nil { + return nil, err + } + return &p, nil +} diff --git a/schedule/rotation/rotation.go b/schedule/rotation/rotation.go new file mode 100644 index 0000000000..bb3a0cb328 --- /dev/null +++ b/schedule/rotation/rotation.go @@ -0,0 +1,114 @@ +package rotation + +import ( + "github.com/target/goalert/validation" + "github.com/target/goalert/validation/validate" + "time" +) + +type Rotation struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + + Type Type `json:"type"` + Start time.Time `json:"start"` + ShiftLength int `json:"shift_length"` +} + +func addHours(t time.Time, n int) time.Time { + return time.Date(t.Year(), t.Month(), t.Day(), t.Hour()+n, t.Minute(), t.Second(), t.Nanosecond(), t.Location()) +} + +func addHoursAlwaysInc(t time.Time, n int) time.Time { + res := addHours(t, n) + if n < 0 { + for !res.Before(t) { + n-- + res = addHours(t, n) + } + } else { + for !res.After(t) { + n++ + res = addHours(t, n) + } + } + + return res +} + +// StartTime calculates the start of the "shift" that started at (or was active) at t. +// For daily and weekly rotations, start time will be the previous handoff time (from start). +func (r Rotation) StartTime(t time.Time) time.Time { + if r.ShiftLength <= 0 { + r.ShiftLength = 1 + } + end := r.EndTime(t) + + switch r.Type { + case TypeHourly: + return addHoursAlwaysInc(end, -r.ShiftLength) + case TypeWeekly: + r.ShiftLength *= 7 + case TypeDaily: + default: + panic("unexpected rotation type") + } + + return end.AddDate(0, 0, -r.ShiftLength) +} + +// EndTime calculates the end of the "shift" that started at (or was active) at t. +// +// For daily and weekly rotations, end time will be the next handoff time (from start). +func (r Rotation) EndTime(t time.Time) time.Time { + if r.ShiftLength <= 0 { + r.ShiftLength = 1 + } + t = t.Truncate(time.Minute) + cTime := r.Start.Truncate(time.Minute) + + switch r.Type { + case TypeHourly: + // while cTime (rotation start) is before t + for !cTime.After(t) { + cTime = addHoursAlwaysInc(cTime, r.ShiftLength) + } + case TypeWeekly: + r.ShiftLength *= 7 + fallthrough + case TypeDaily: + // while cTime (rotation start) is before t + for !cTime.After(t) { + // getting end of shift + cTime = cTime.AddDate(0, 0, r.ShiftLength) + } + default: + panic("unexpected rotation type") + } + + return cTime +} + +func (r Rotation) Normalize() (*Rotation, error) { + if r.ShiftLength == 0 { + // default to 1 + r.ShiftLength = 1 + } + r.Start = r.Start.Truncate(time.Minute) + + if r.Start.Location() == nil { + return nil, validation.NewFieldError("TimeZone", "must be specified") + } + err := validate.Many( + validate.IDName("Name", r.Name), + validate.Range("ShiftLength", r.ShiftLength, 1, 9000), + validate.OneOf("Type", r.Type, TypeWeekly, TypeDaily, TypeHourly), + validate.Text("Description", r.Description, 1, 255), + ) + if err != nil { + return nil, err + } + + return &r, nil +} diff --git a/schedule/rotation/rotation_test.go b/schedule/rotation/rotation_test.go new file mode 100644 index 0000000000..3ef1483e43 --- /dev/null +++ b/schedule/rotation/rotation_test.go @@ -0,0 +1,249 @@ +package rotation + +import ( + "testing" + "time" +) + +const timeFmt = "Jan 2 2006 3:04 pm" + +func mustParse(t *testing.T, value string) time.Time { + t.Helper() + loc, err := time.LoadLocation("America/Chicago") + if err != nil { + t.Fatal(err) + } + tm, err := time.ParseInLocation(timeFmt, value, loc) + if err != nil { + t.Fatal(err) + } + tm = tm.In(loc) + return tm +} + +func TestRotation_EndTime_DST(t *testing.T) { + tFmt := timeFmt + " (-07:00)" + rot := &Rotation{ + Type: TypeHourly, + Start: mustParse(t, "Jan 1 2017 1:00 am"), + } + t.Logf("Rotation Start=%s", rot.Start.Format(tFmt)) + + test := func(start, end time.Time) { + t.Run("", func(t *testing.T) { + t.Logf("Shift Start=%s", start.Format(tFmt)) + e := rot.EndTime(start) + if !e.Equal(end) { + t.Errorf("got '%s' want '%s'", e.Format(tFmt), end.Format(tFmt)) + } + }) + } + + start := rot.Start.AddDate(0, 2, 11) // mar 11 1:00am + end := start.Add(time.Hour) // same time (we skip a shift) + test(start, end) + + start = rot.Start.AddDate(0, 10, 4) // nov 5 1:00am + end = start.Add(time.Hour * 2) // 2 hours after + test(start, end) +} + +func TestRotation_EndTime_ConfigChange(t *testing.T) { + rot := &Rotation{ + Type: TypeHourly, + Start: mustParse(t, "Jan 1 2017 12:00 am"), + ShiftLength: 12, + } + + start := mustParse(t, "Jan 3 2017 6:00 am") + result := rot.EndTime(start) + + expected := mustParse(t, "Jan 3 2017 12:00 pm") + if !result.Equal(expected) { + t.Errorf("EndTime=%s; want %s", result.Format(timeFmt), expected.Format(timeFmt)) + } +} + +func TestRotation_EndTime(t *testing.T) { + test := func(start, end string, len int, dur time.Duration, typ Type) { + t.Run(string(typ), func(t *testing.T) { + s := mustParse(t, start) + e := mustParse(t, end) + dur = dur.Round(time.Second) + if e.Sub(s).Round(time.Second) != dur { + t.Fatalf("bad test data: end-start=%s; want %s", e.Sub(s).Round(time.Second).String(), dur.String()) + } + rot := &Rotation{ + Type: typ, + ShiftLength: len, + Start: s, + } + + result := rot.EndTime(s) + if !result.Equal(e) { + t.Errorf("got '%s'; want '%s'", result.Format(timeFmt), end) + } + if result.Sub(s).Round(time.Second) != dur { + t.Errorf("duration was '%s'; want '%s'", result.Sub(s).Round(time.Second).String(), dur.String()) + } + }) + } + + type dat struct { + s string + l int + exp string + dur time.Duration + } + + // weekly + data := []dat{ + {s: "Jun 10 2017 8:00 am", l: 1, exp: "Jun 17 2017 8:00 am", dur: time.Hour * 24 * 7}, + {s: "Jun 10 2017 8:00 am", l: 2, exp: "Jun 24 2017 8:00 am", dur: time.Hour * 24 * 7 * 2}, + + // DST tests + {s: "Mar 10 2017 8:00 am", l: 1, exp: "Mar 17 2017 8:00 am", dur: time.Hour*24*7 - time.Hour}, + {s: "Nov 4 2017 8:00 am", l: 1, exp: "Nov 11 2017 8:00 am", dur: time.Hour*24*7 + time.Hour}, + } + for _, d := range data { + test(d.s, d.exp, d.l, d.dur, TypeWeekly) + } + + // daily + data = []dat{ + {s: "Jun 10 2017 8:00 am", l: 1, exp: "Jun 11 2017 8:00 am", dur: time.Hour * 24}, + {s: "Jun 10 2017 8:00 am", l: 2, exp: "Jun 12 2017 8:00 am", dur: time.Hour * 24 * 2}, + + // DST tests + {s: "Mar 11 2017 8:00 am", l: 1, exp: "Mar 12 2017 8:00 am", dur: time.Hour * 23}, + {s: "Nov 4 2017 8:00 am", l: 1, exp: "Nov 5 2017 8:00 am", dur: time.Hour * 25}, + } + for _, d := range data { + test(d.s, d.exp, d.l, d.dur, TypeDaily) + } + + // hourly + data = []dat{ + {s: "Jun 10 2017 8:00 am", l: 1, exp: "Jun 10 2017 9:00 am", dur: time.Hour}, + {s: "Jun 10 2017 8:00 am", l: 2, exp: "Jun 10 2017 10:00 am", dur: time.Hour * 2}, + + // DST tests + {s: "Mar 12 2017 12:00 am", l: 3, exp: "Mar 12 2017 3:00 am", dur: time.Hour * 2}, + {s: "Nov 5 2017 12:00 am", l: 3, exp: "Nov 5 2017 3:00 am", dur: time.Hour * 4}, + } + for _, d := range data { + test(d.s, d.exp, d.l, d.dur, TypeHourly) + } +} + +func TestRotation_Normalize(t *testing.T) { + + test := func(valid bool, r Rotation) { + name := "valid" + if !valid { + name = "invalid" + } + t.Run(name, func(t *testing.T) { + _, err := r.Normalize() + if valid && err != nil { + t.Errorf("err = %v; want nil", err) + } else if !valid && err == nil { + t.Errorf("got nil err; want non-nil") + } + }) + } + + valid := []Rotation{ + {Name: "Default", ShiftLength: 1, Type: TypeWeekly, Description: "Default Rotation"}, + } + invalid := []Rotation{ + {Name: "D", ShiftLength: -100, Type: TypeWeekly, Description: "Default Rotation"}, + } + for _, r := range valid { + test(true, r) + } + for _, r := range invalid { + test(false, r) + } +} + +func TestRotation_StartTime(t *testing.T) { + test := func(start, end string, len int, dur time.Duration, typ Type) { + t.Run(string(typ), func(t *testing.T) { + s := mustParse(t, start) + e := mustParse(t, end) + dur = dur.Round(time.Second) + if e.Sub(s).Round(time.Second) != dur { + t.Fatalf("bad test data: end-start=%s; want %s", e.Sub(s).Round(time.Second).String(), dur.String()) + } + rot := &Rotation{ + Type: typ, + ShiftLength: len, + Start: s, + } + + result := rot.StartTime(s) + if !result.Equal(s) { + t.Errorf("got '%s'; want '%s'", result.Format(timeFmt), start) + } + + if result.Sub(e).Round(time.Second) != -(dur) { + t.Errorf("duration was '%s'; want '%s'", result.Sub(e).Round(time.Second).String(), dur.String()) + } + }) + } + + type dat struct { + s string + l int + exp string + dur time.Duration + } + + // weekly + data := []dat{ + {s: "Jun 10 2017 8:00 am", l: 1, exp: "Jun 17 2017 8:00 am", dur: time.Hour * 24 * 7}, + {s: "Jun 10 2017 8:00 am", l: 2, exp: "Jun 24 2017 8:00 am", dur: time.Hour * 24 * 7 * 2}, + + // DST tests + {s: "Mar 10 2017 8:00 am", l: 1, exp: "Mar 17 2017 8:00 am", dur: time.Hour*24*7 - time.Hour}, + {s: "Nov 4 2017 8:00 am", l: 1, exp: "Nov 11 2017 8:00 am", dur: time.Hour*24*7 + time.Hour}, + } + for _, d := range data { + test(d.s, d.exp, d.l, d.dur, TypeWeekly) + } + + // weekly but with different start timestamp for comparison + data = []dat{ + {s: "Jun 10 2017 8:00 am", l: 1, exp: "Jun 17 2017 8:00 am", dur: time.Hour * 24 * 7}, + } + for _, d := range data { + test("Jun 16 2017 8:00 am", d.exp, d.l, time.Hour*24, TypeWeekly) + } + + // daily + data = []dat{ + {s: "Jun 10 2017 8:00 am", l: 1, exp: "Jun 11 2017 8:00 am", dur: time.Hour * 24}, + {s: "Jun 10 2017 8:00 am", l: 2, exp: "Jun 12 2017 8:00 am", dur: time.Hour * 24 * 2}, + + // DST tests + {s: "Mar 11 2017 8:00 am", l: 1, exp: "Mar 12 2017 8:00 am", dur: time.Hour * 23}, + {s: "Nov 4 2017 8:00 am", l: 1, exp: "Nov 5 2017 8:00 am", dur: time.Hour * 25}, + } + for _, d := range data { + test(d.s, d.exp, d.l, d.dur, TypeDaily) + } + + // hourly + data = []dat{ + {s: "Jun 10 2017 8:00 am", l: 1, exp: "Jun 10 2017 9:00 am", dur: time.Hour}, + {s: "Jun 10 2017 8:00 am", l: 2, exp: "Jun 10 2017 10:00 am", dur: time.Hour * 2}, + + // DST tests + {s: "Mar 12 2017 12:00 am", l: 3, exp: "Mar 12 2017 3:00 am", dur: time.Hour * 2}, + {s: "Nov 5 2017 12:00 am", l: 3, exp: "Nov 5 2017 3:00 am", dur: time.Hour * 4}, + } + for _, d := range data { + test(d.s, d.exp, d.l, d.dur, TypeHourly) + } +} diff --git a/schedule/rotation/search.go b/schedule/rotation/search.go new file mode 100644 index 0000000000..1368aae12a --- /dev/null +++ b/schedule/rotation/search.go @@ -0,0 +1,128 @@ +package rotation + +import ( + "context" + "database/sql" + "github.com/target/goalert/permission" + "github.com/target/goalert/search" + "github.com/target/goalert/util" + "github.com/target/goalert/validation/validate" + "text/template" + + "github.com/lib/pq" + "github.com/pkg/errors" +) + +// SearchOptions allow filtering and paginating the list of rotations. +type SearchOptions struct { + Search string `json:"s,omitempty"` + After SearchCursor `json:"a,omitempty"` + + // Omit specifies a list of rotation IDs to exclude from the results + Omit []string `json:"o,omitempty"` + + Limit int `json:"-"` +} + +// SearchCursor is used to indicate a position in a paginated list. +type SearchCursor struct { + Name string `json:"n,omitempty"` +} + +var searchTemplate = template.Must(template.New("search").Parse(` + SELECT + id, name, description, type, start_time, shift_length, time_zone + FROM rotations rot + WHERE true + {{if .Omit}} + AND not id = any(:omit) + {{end}} + {{if .SearchStr}} + AND (rot.name ILIKE :search OR rot.description ILIKE :search) + {{end}} + {{if .After.Name}} + AND lower(rot.name) > lower(:afterName) + {{end}} + ORDER BY lower(rot.name) + LIMIT {{.Limit}} +`)) + +type renderData SearchOptions + +func (opts renderData) SearchStr() string { + if opts.Search == "" { + return "" + } + + return "%" + search.Escape(opts.Search) + "%" +} + +func (opts renderData) Normalize() (*renderData, error) { + if opts.Limit == 0 { + opts.Limit = search.DefaultMaxResults + } + + err := validate.Many( + validate.Text("Search", opts.Search, 0, search.MaxQueryLen), + validate.Range("Limit", opts.Limit, 0, search.MaxResults), + validate.ManyUUID("Omit", opts.Omit, 50), + ) + if opts.After.Name != "" { + err = validate.Many(err, validate.IDName("After.Name", opts.After.Name)) + } + + return &opts, err +} + +func (opts renderData) QueryArgs() []sql.NamedArg { + return []sql.NamedArg{ + sql.Named("search", opts.SearchStr()), + sql.Named("afterName", opts.After.Name), + sql.Named("omit", pq.StringArray(opts.Omit)), + } +} + +func (db *DB) Search(ctx context.Context, opts *SearchOptions) ([]Rotation, error) { + err := permission.LimitCheckAny(ctx, permission.User) + if err != nil { + return nil, err + } + if opts == nil { + opts = &SearchOptions{} + } + data, err := (*renderData)(opts).Normalize() + if err != nil { + return nil, err + } + query, args, err := search.RenderQuery(ctx, searchTemplate, data) + if err != nil { + return nil, errors.Wrap(err, "render query") + } + + rows, err := db.db.QueryContext(ctx, query, args...) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, err + } + defer rows.Close() + + var result []Rotation + var r Rotation + var tz string + for rows.Next() { + err = rows.Scan(&r.ID, &r.Name, &r.Description, &r.Type, &r.Start, &r.ShiftLength, &tz) + if err != nil { + return nil, err + } + loc, err := util.LoadLocation(tz) + if err != nil { + return nil, err + } + r.Start = r.Start.In(loc) + result = append(result, r) + } + + return result, nil +} diff --git a/schedule/rotation/state.go b/schedule/rotation/state.go new file mode 100644 index 0000000000..9a3d300a14 --- /dev/null +++ b/schedule/rotation/state.go @@ -0,0 +1,24 @@ +package rotation + +import ( + "github.com/target/goalert/validation/validate" + "time" +) + +type State struct { + RotationID string + ParticipantID string + Position int + ShiftStart time.Time +} + +func (s State) Normalize() (*State, error) { + err := validate.Many( + validate.UUID("ParticipantID", s.ParticipantID), + validate.Range("Position", s.Position, 0, 9000), + ) + if err != nil { + return nil, err + } + return &s, nil +} diff --git a/schedule/rotation/store.go b/schedule/rotation/store.go new file mode 100644 index 0000000000..a1179f81d2 --- /dev/null +++ b/schedule/rotation/store.go @@ -0,0 +1,904 @@ +package rotation + +import ( + "context" + "database/sql" + "github.com/target/goalert/assignment" + "github.com/target/goalert/permission" + "github.com/target/goalert/util" + "github.com/target/goalert/validation" + "github.com/target/goalert/validation/validate" + "sort" + + "github.com/lib/pq" + "github.com/pkg/errors" + uuid "github.com/satori/go.uuid" +) + +// ErrNoState is returned when there is no state information available for a rotation. +var ErrNoState = errors.New("no state available") + +type Store interface { + ReadStore + ReadStateStore + CreateRotation(context.Context, *Rotation) (*Rotation, error) + CreateRotationTx(context.Context, *sql.Tx, *Rotation) (*Rotation, error) + UpdateRotation(context.Context, *Rotation) error + UpdateRotationTx(context.Context, *sql.Tx, *Rotation) error + DeleteRotation(context.Context, string) error + DeleteRotationTx(context.Context, *sql.Tx, string) error + DeleteManyTx(context.Context, *sql.Tx, []string) error + + AddParticipant(context.Context, *Participant) (*Participant, error) + AddParticipantTx(context.Context, *sql.Tx, *Participant) (*Participant, error) + RemoveParticipant(context.Context, string) (rotationID string, err error) + RemoveParticipantTx(context.Context, *sql.Tx, string) (rotationID string, err error) + MoveParticipant(context.Context, string, int) error + IsParticipantActive(context.Context, string) (bool, error) + SetActiveParticipant(ctx context.Context, rotationID, participantID string) error + SetActiveIndexTx(ctx context.Context, tx *sql.Tx, rotID string, position int) error + FindMany(context.Context, []string) ([]Rotation, error) + Search(context.Context, *SearchOptions) ([]Rotation, error) + + AddRotationUsersTx(ctx context.Context, tx *sql.Tx, rotationID string, userIDs []string) error + DeleteRotationParticipantsTx(ctx context.Context, tx *sql.Tx, partIDs []string) error + UpdateParticipantUserIDTx(ctx context.Context, tx *sql.Tx, partID, userID string) error + DeleteStateTx(ctx context.Context, tx *sql.Tx, rotationID string) error +} +type StateStore interface { + ReadStore + StateReader + ParticipantReader +} +type StateReader interface { + State(context.Context, string) (*State, error) + StateTx(context.Context, *sql.Tx, string) (*State, error) + FindAllStateByScheduleID(context.Context, string) ([]State, error) +} + +type ReadStateStore interface { + StateReader + ParticipantReader +} +type ParticipantReader interface { + FindParticipant(ctx context.Context, id string) (*Participant, error) + FindAllParticipants(ctx context.Context, rotationID string) ([]Participant, error) + FindAllParticipantsTx(ctx context.Context, tx *sql.Tx, rotationID string) ([]Participant, error) + FindAllParticipantsByScheduleID(ctx context.Context, scheduleID string) ([]Participant, error) +} +type ReadStore interface { + FindRotation(context.Context, string) (*Rotation, error) + FindRotationForUpdateTx(context.Context, *sql.Tx, string) (*Rotation, error) + FindAllRotations(context.Context) ([]Rotation, error) + FindAllRotationsByScheduleID(context.Context, string) ([]Rotation, error) + FindParticipantCount(context.Context, string) (int, error) +} + +var _ = Store(&DB{}) + +type DB struct { + db *sql.DB + + createRotation *sql.Stmt + updateRotation *sql.Stmt + findAllRotations *sql.Stmt + findRotation *sql.Stmt + findRotationForUpdate *sql.Stmt + deleteRotation *sql.Stmt + findMany *sql.Stmt + + findAllBySched *sql.Stmt + findAllParticipantsBySched *sql.Stmt + findAllStateBySched *sql.Stmt + + findAllParticipants *sql.Stmt + addParticipant *sql.Stmt + deleteParticipant *sql.Stmt + moveParticipant *sql.Stmt + setActiveParticipant *sql.Stmt + findParticipant *sql.Stmt + participantActive *sql.Stmt + findPartPos *sql.Stmt + + state *sql.Stmt + rmState *sql.Stmt + partRotID *sql.Stmt + + addParticipants *sql.Stmt + deleteParticipants *sql.Stmt + updateParticipantUserID *sql.Stmt + setActiveIndex *sql.Stmt + + findPartCount *sql.Stmt +} + +func NewDB(ctx context.Context, db *sql.DB) (*DB, error) { + p := &util.Prepare{DB: db, Ctx: ctx} + + return &DB{ + db: db, + + createRotation: p.P(`INSERT INTO rotations (id, name, description, type, start_time, shift_length, time_zone) VALUES ($1, $2, $3, $4, $5, $6, $7)`), + updateRotation: p.P(`UPDATE rotations SET name = $2, description = $3, type = $4, start_time = $5, shift_length = $6, time_zone = $7 WHERE id = $1`), + findAllRotations: p.P(`SELECT id, name, description, type, start_time, shift_length, time_zone FROM rotations`), + findRotation: p.P(`SELECT id, name, description, type, start_time, shift_length, time_zone FROM rotations WHERE id = $1`), + findRotationForUpdate: p.P(`SELECT id, name, description, type, start_time, shift_length, time_zone FROM rotations WHERE id = $1 FOR UPDATE`), + deleteRotation: p.P(`DELETE FROM rotations WHERE id = ANY($1)`), + + findMany: p.P(`SELECT id, name, description, type, start_time, shift_length, time_zone FROM rotations WHERE id = any($1)`), + + partRotID: p.P(`SELECT rotation_id FROM rotation_participants WHERE id = $1`), + + findAllBySched: p.P(` + SELECT id, name, description, type, start_time, shift_length, time_zone + FROM rotations + WHERE id IN ( + SELECT DISTINCT tgt_rotation_id + FROM schedule_rules + WHERE schedule_id = $1 + ) + `), + findAllParticipantsBySched: p.P(` + SELECT id, rotation_id, position, user_id + FROM rotation_participants + WHERE rotation_id IN ( + SELECT DISTINCT tgt_rotation_id + FROM schedule_rules + WHERE schedule_id = $1 + ) + `), + findAllStateBySched: p.P(` + SELECT + rotation_id, + position, + rotation_participant_id, + shift_start + FROM rotation_state + WHERE rotation_id IN ( + SELECT DISTINCT tgt_rotation_id + FROM schedule_rules + WHERE schedule_id = $1 + ) + `), + + addParticipant: p.P(` + INSERT INTO rotation_participants (id, rotation_id, position, user_id) + VALUES ( + $1, + $2, + 0, + $3 + ) + RETURNING position + `), + deleteParticipant: p.P(`DELETE FROM rotation_participants WHERE id = $1 RETURNING rotation_id`), + moveParticipant: p.P(` + WITH calc AS ( + SELECT + rotation_id rot_id, + position old_pos, + LEAST(position, $2) min, + GREATEST(position, $2) max, + ($2 - position) diff, + CASE + WHEN position < $2 THEN abs($2-position) + WHEN position > $2 THEN 1 + ELSE 0 + END shift + FROM rotation_participants + WHERE id = $1 + FOR UPDATE + ) + UPDATE rotation_participants + SET position = ((position - calc.min) + calc.shift) % (abs(calc.diff) + 1) + calc.min + FROM calc + WHERE + rotation_id = calc.rot_id AND + position >= calc.min AND + position <= calc.max + RETURNING rotation_id + `), + setActiveParticipant: p.P(` + UPDATE rotation_state + SET rotation_participant_id = $2 + WHERE rotation_id = $1 + `), + + findPartPos: p.P(`SELECT position, rotation_id FROM rotation_participants WHERE id = $1`), + findAllParticipants: p.P(`SELECT id, rotation_id, position, user_id FROM rotation_participants WHERE rotation_id = $1 ORDER BY position`), + + findParticipant: p.P(`SELECT rotation_id, position, user_id FROM rotation_participants WHERE id = $1`), + participantActive: p.P(`SELECT 1 FROM rotation_state WHERE rotation_participant_id = $1 LIMIT 1`), + state: p.P(` + SELECT + position, + rotation_participant_id, + shift_start + FROM rotation_state + WHERE rotation_id = $1 + `), + rmState: p.P(` + DELETE FROM rotation_state WHERE rotation_id = $1 + `), + + addParticipants: p.P(` + INSERT INTO rotation_participants (rotation_id, user_id) SELECT $1, unnest FROM unnest($2::UUID[]) + `), + + deleteParticipants: p.P(` + DELETE FROM rotation_participants WHERE id = ANY($1) + `), + + updateParticipantUserID: p.P(` + UPDATE rotation_participants SET user_id = $2 WHERE id = $1 + `), + + setActiveIndex: p.P(` + UPDATE rotation_state SET rotation_participant_id = (SELECT id FROM rotation_participants WHERE rotation_id = $1 AND position = $2), + position = $2 + WHERE rotation_id = $1 + `), + findPartCount: p.P(`SELECT participant_count FROM rotations WHERE id = $1`), + }, p.Err +} + +func (db *DB) FindAllRotationsByScheduleID(ctx context.Context, schedID string) ([]Rotation, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + err = validate.UUID("ScheduleID", schedID) + if err != nil { + return nil, err + } + rows, err := db.findAllBySched.QueryContext(ctx, schedID) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, err + } + defer rows.Close() + + var rotations []Rotation + var rot Rotation + var tz string + for rows.Next() { + err = rows.Scan(&rot.ID, &rot.Name, &rot.Description, &rot.Type, &rot.Start, &rot.ShiftLength, &tz) + if err != nil { + return nil, err + } + loc, err := util.LoadLocation(tz) + if err != nil { + return nil, err + } + rot.Start = rot.Start.In(loc) + rotations = append(rotations, rot) + } + return rotations, nil +} + +func (db *DB) IsParticipantActive(ctx context.Context, partID string) (bool, error) { + err := validate.UUID("RotationParticipantID", partID) + if err != nil { + return false, err + } + err = permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return false, err + } + var n int + err = db.participantActive.QueryRowContext(ctx, partID).Scan(&n) + if err == sql.ErrNoRows { + return false, nil + } + if err != nil { + return false, err + } + return true, nil +} + +func (db *DB) State(ctx context.Context, id string) (*State, error) { + return db.StateTx(ctx, nil, id) +} + +func (db *DB) StateTx(ctx context.Context, tx *sql.Tx, id string) (*State, error) { + err := validate.UUID("RotationID", id) + if err != nil { + return nil, err + } + err = permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + + stmt := db.state + if tx != nil { + stmt = tx.StmtContext(ctx, stmt) + } + + row := stmt.QueryRowContext(ctx, id) + var s State + var part sql.NullString + err = row.Scan(&s.Position, &part, &s.ShiftStart) + if err == sql.ErrNoRows { + return nil, ErrNoState + } + if err != nil { + return nil, errors.Wrap(err, "query rotation state") + } + s.ParticipantID = part.String + s.RotationID = id + + return &s, nil +} + +func (db *DB) FindAllStateByScheduleID(ctx context.Context, scheduleID string) ([]State, error) { + err := validate.UUID("ScheduleID", scheduleID) + if err != nil { + return nil, err + } + err = permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + + rows, err := db.findAllStateBySched.QueryContext(ctx, scheduleID) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, err + } + defer rows.Close() + + var results []State + var s State + var part sql.NullString + for rows.Next() { + err = rows.Scan(&s.RotationID, &s.Position, &part, &s.ShiftStart) + if err != nil { + return nil, err + } + s.ParticipantID = part.String + results = append(results, s) + } + + return results, nil +} + +func (db *DB) CreateRotation(ctx context.Context, r *Rotation) (*Rotation, error) { + return db.CreateRotationTx(ctx, nil, r) +} + +func (db *DB) CreateRotationTx(ctx context.Context, tx *sql.Tx, r *Rotation) (*Rotation, error) { + n, err := r.Normalize() + if err != nil { + return nil, err + } + + err = permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + + stmt := db.createRotation + if tx != nil { + stmt = tx.Stmt(stmt) + } + + n.ID = uuid.NewV4().String() + + _, err = stmt.ExecContext(ctx, n.ID, n.Name, n.Description, n.Type, n.Start, n.ShiftLength, n.Start.Location().String()) + if err != nil { + return nil, err + } + return n, nil +} + +func (db *DB) UpdateRotation(ctx context.Context, r *Rotation) error { + return db.UpdateRotationTx(ctx, nil, r) +} + +func (db *DB) UpdateRotationTx(ctx context.Context, tx *sql.Tx, r *Rotation) error { + err := validate.UUID("RotationID", r.ID) + if err != nil { + return err + } + n, err := r.Normalize() + if err != nil { + return err + } + + err = permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return err + } + + s := db.updateRotation + if tx != nil { + s = tx.StmtContext(ctx, s) + } + + _, err = s.ExecContext(ctx, n.ID, n.Name, n.Description, n.Type, n.Start, n.ShiftLength, n.Start.Location().String()) + return err +} +func (db *DB) FindAllRotations(ctx context.Context) ([]Rotation, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + + rows, err := db.findAllRotations.QueryContext(ctx) + if err != nil { + return nil, err + } + defer rows.Close() + + var r Rotation + var res []Rotation + var tz string + for rows.Next() { + err = rows.Scan(&r.ID, &r.Name, &r.Description, &r.Type, &r.Start, &r.ShiftLength, &tz) + if err != nil { + return nil, err + } + loc, err := util.LoadLocation(tz) + if err != nil { + return nil, err + } + r.Start = r.Start.In(loc) + res = append(res, r) + } + return res, nil +} + +func (db *DB) FindMany(ctx context.Context, ids []string) ([]Rotation, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + err = validate.ManyUUID("RotationID", ids, 200) + if err != nil { + return nil, err + } + + rows, err := db.findMany.QueryContext(ctx, pq.StringArray(ids)) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, err + } + defer rows.Close() + + var r Rotation + var tz string + result := make([]Rotation, 0, len(ids)) + for rows.Next() { + err = rows.Scan(&r.ID, &r.Name, &r.Description, &r.Type, &r.Start, &r.ShiftLength, &tz) + if err != nil { + return nil, err + } + loc, err := util.LoadLocation(tz) + if err != nil { + return nil, err + } + r.Start = r.Start.In(loc) + result = append(result, r) + } + + return result, nil +} + +func (db *DB) FindRotation(ctx context.Context, id string) (*Rotation, error) { + err := validate.UUID("RotationID", id) + if err != nil { + return nil, err + } + err = permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + + row := db.findRotation.QueryRowContext(ctx, id) + var r Rotation + var tz string + err = row.Scan(&r.ID, &r.Name, &r.Description, &r.Type, &r.Start, &r.ShiftLength, &tz) + if err != nil { + return nil, err + } + loc, err := util.LoadLocation(tz) + if err != nil { + return nil, err + } + r.Start = r.Start.In(loc) + return &r, nil +} + +func (db *DB) FindParticipantCount(ctx context.Context, id string) (int, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return -1, err + } + + err = validate.UUID("RotationID", id) + if err != nil { + return -1, err + } + + row := db.findPartCount.QueryRowContext(ctx, id) + var count int + err = row.Scan(&count) + if err != nil { + return -1, err + } + + return count, nil +} + +func (db *DB) FindRotationForUpdateTx(ctx context.Context, tx *sql.Tx, rotationID string) (*Rotation, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + + err = validate.UUID("RotationID", rotationID) + if err != nil { + return nil, err + } + + s := db.findRotationForUpdate + if tx != nil { + s = tx.StmtContext(ctx, s) + } + + row := s.QueryRowContext(ctx, rotationID) + var r Rotation + var tz string + err = row.Scan(&r.ID, &r.Name, &r.Description, &r.Type, &r.Start, &r.ShiftLength, &tz) + if err != nil { + return nil, err + } + loc, err := util.LoadLocation(tz) + if err != nil { + return nil, err + } + r.Start = r.Start.In(loc) + return &r, nil +} + +func (db *DB) DeleteRotation(ctx context.Context, id string) error { + return db.DeleteRotationTx(ctx, nil, id) +} +func (db *DB) DeleteRotationTx(ctx context.Context, tx *sql.Tx, id string) error { + return db.DeleteManyTx(ctx, nil, []string{id}) +} + +func (db *DB) DeleteManyTx(ctx context.Context, tx *sql.Tx, ids []string) error { + err := permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return err + } + err = validate.ManyUUID("RotationID", ids, 50) + if err != nil { + return err + } + s := db.deleteRotation + if tx != nil { + s = tx.StmtContext(ctx, s) + } + _, err = s.ExecContext(ctx, pq.StringArray(ids)) + return err + +} +func (db *DB) FindAllParticipantsByScheduleID(ctx context.Context, scheduleID string) ([]Participant, error) { + err := validate.UUID("ScheduleID", scheduleID) + if err != nil { + return nil, err + } + err = permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + + rows, err := db.findAllParticipantsBySched.QueryContext(ctx, scheduleID) + if err != nil { + return nil, err + } + defer rows.Close() + + var p Participant + var userID sql.NullString + var res []Participant + for rows.Next() { + err = rows.Scan(&p.ID, &p.RotationID, &p.Position, &userID) + if err != nil { + return nil, err + } + if userID.Valid { + p.Target = assignment.UserTarget(userID.String) + } else { + p.Target = nil + } + res = append(res, p) + } + + return res, nil +} +func (db *DB) FindAllParticipantsTx(ctx context.Context, tx *sql.Tx, rotationID string) ([]Participant, error) { + err := validate.UUID("RotationID", rotationID) + if err != nil { + return nil, err + } + err = permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + + stmt := db.findAllParticipants + if tx != nil { + stmt = tx.StmtContext(ctx, stmt) + } + + rows, err := stmt.QueryContext(ctx, rotationID) + if err != nil { + return nil, err + } + defer rows.Close() + + var p Participant + var userID sql.NullString + var res []Participant + for rows.Next() { + err = rows.Scan(&p.ID, &p.RotationID, &p.Position, &userID) + if err != nil { + return nil, err + } + if userID.Valid { + p.Target = assignment.UserTarget(userID.String) + } else { + p.Target = nil + } + res = append(res, p) + } + + sort.Slice(res, func(i, j int) bool { return res[i].Position < res[j].Position }) + + return res, nil +} + +func (db *DB) FindAllParticipants(ctx context.Context, rotationID string) ([]Participant, error) { + return db.FindAllParticipantsTx(ctx, nil, rotationID) +} + +func (db *DB) AddParticipant(ctx context.Context, p *Participant) (*Participant, error) { + return db.AddParticipantTx(ctx, nil, p) +} + +func (db *DB) AddParticipantTx(ctx context.Context, tx *sql.Tx, p *Participant) (*Participant, error) { + n, err := p.Normalize() + if err != nil { + return nil, err + } + + err = permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return nil, err + } + + stmt := db.addParticipant + if tx != nil { + stmt = tx.Stmt(stmt) + } + + n.ID = uuid.NewV4().String() + + row := stmt.QueryRowContext(ctx, n.ID, n.RotationID, n.Target.TargetID()) + err = row.Scan(&n.Position) + if err != nil { + return nil, err + } + + return n, nil +} + +func (db *DB) RemoveParticipant(ctx context.Context, id string) (string, error) { + return db.RemoveParticipantTx(ctx, nil, id) +} +func (db *DB) RemoveParticipantTx(ctx context.Context, tx *sql.Tx, id string) (string, error) { + err := validate.UUID("RotationParticipantID", id) + if err != nil { + return "", err + } + err = permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return "", err + } + + s := db.deleteParticipant + if tx != nil { + s = tx.Stmt(s) + } + var rotID string + err = s.QueryRowContext(ctx, id).Scan(&rotID) + if err != nil { + return "", err + } + if err == sql.ErrNoRows { + err = nil + } + if err != nil { + return "", err + } + + return rotID, nil +} +func (db *DB) MoveParticipant(ctx context.Context, id string, newPos int) error { + err := validate.Many( + validate.UUID("RotationParticipantID", id), + validate.Range("NewPosition", newPos, 0, 9000), + ) + if err != nil { + return err + } + err = permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return err + } + + var rotID string + err = db.moveParticipant.QueryRowContext(ctx, id, newPos).Scan(&rotID) + return err +} + +func (db *DB) SetActiveParticipant(ctx context.Context, rotID string, partID string) error { + err := permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return err + } + + err = validate.Many( + validate.UUID("RotationID", rotID), + validate.UUID("RotationParticipantID", partID), + ) + if err != nil { + return err + } + + _, err = db.setActiveParticipant.ExecContext(ctx, rotID, partID) + return err +} + +func (db *DB) SetActiveIndexTx(ctx context.Context, tx *sql.Tx, rotID string, position int) error { + err := permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return err + } + + err = validate.Many( + validate.UUID("RotationID", rotID), + ) + if err != nil { + return err + } + + stmt := db.setActiveIndex + if tx != nil { + stmt = tx.StmtContext(ctx, stmt) + } + + _, err = stmt.ExecContext(ctx, rotID, position) + if p, ok := err.(*pq.Error); ok && p.Code == "23502" && p.Column == "rotation_participant_id" { + // 23502 is not_null_violation + // https://www.postgresql.org/docs/9.6/errcodes-appendix.html + // We are checking to see if there is no participant for that position before returning a validation error + return validation.NewFieldError("ActiveUserIndex", "invalid index for rotation") + } + return err +} + +func (db *DB) FindParticipant(ctx context.Context, id string) (*Participant, error) { + err := validate.UUID("RotationParticipantID", id) + if err != nil { + return nil, err + } + + err = permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + + row := db.findParticipant.QueryRowContext(ctx, id) + var p Participant + p.ID = id + var userID sql.NullString + err = row.Scan(&p.RotationID, &p.Position, &userID) + if userID.Valid { + p.Target = assignment.UserTarget(userID.String) + } + + return &p, err +} + +func (db *DB) AddRotationUsersTx(ctx context.Context, tx *sql.Tx, rotationID string, userIDs []string) error { + err := permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return err + } + + err = validate.ManyUUID("UserIDs", userIDs, 50) + if err != nil { + return err + } + + stmt := db.addParticipants + if tx != nil { + stmt = tx.StmtContext(ctx, stmt) + } + _, err = stmt.ExecContext(ctx, rotationID, pq.StringArray(userIDs)) + + return err +} + +func (db *DB) DeleteRotationParticipantsTx(ctx context.Context, tx *sql.Tx, partIDs []string) error { + err := permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return err + } + + err = validate.ManyUUID("ParticipantIDs", partIDs, 50) + if err != nil { + return err + } + + stmt := db.deleteParticipants + if tx != nil { + stmt = tx.StmtContext(ctx, stmt) + } + + _, err = stmt.ExecContext(ctx, pq.StringArray(partIDs)) + return err +} + +func (db *DB) UpdateParticipantUserIDTx(ctx context.Context, tx *sql.Tx, partID, userID string) error { + err := permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return err + } + + err = validate.Many( + validate.UUID("ParticipantID", partID), + validate.UUID("UserID", userID), + ) + if err != nil { + return err + } + + stmt := db.updateParticipantUserID + if tx != nil { + stmt = tx.StmtContext(ctx, stmt) + } + + _, err = stmt.ExecContext(ctx, partID, userID) + return err +} + +func (db *DB) DeleteStateTx(ctx context.Context, tx *sql.Tx, rotationID string) error { + err := permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return err + } + err = validate.UUID("RotationID", rotationID) + if err != nil { + return err + } + + stmt := db.rmState + if tx != nil { + stmt = tx.StmtContext(ctx, stmt) + } + + _, err = stmt.ExecContext(ctx, rotationID) + return err +} diff --git a/schedule/rotation/type.go b/schedule/rotation/type.go new file mode 100644 index 0000000000..0e0e7d9710 --- /dev/null +++ b/schedule/rotation/type.go @@ -0,0 +1,74 @@ +package rotation + +import ( + "database/sql/driver" + "fmt" + "github.com/target/goalert/validation" + "io" + + "github.com/99designs/gqlgen/graphql" +) + +type Type string + +const ( + TypeWeekly Type = "weekly" + TypeDaily Type = "daily" + TypeHourly Type = "hourly" +) + +// Scan handles reading a Role from the DB format +func (r *Type) Scan(value interface{}) error { + switch t := value.(type) { + case []byte: + *r = Type(t) + case string: + *r = Type(t) + default: + return fmt.Errorf("could not process unknown type for rotation type: %T", t) + } + + return nil +} + +// Value converts the Role to the DB representation +func (r Type) Value() (driver.Value, error) { + switch r { + case TypeWeekly, TypeDaily, TypeHourly: + return string(r), nil + default: + return nil, fmt.Errorf("unknown rotation type specified '%s'", r) + } +} + +// UnmarshalGQL implements the graphql.Marshaler interface +func (t *Type) UnmarshalGQL(v interface{}) error { + str, err := graphql.UnmarshalString(v) + if err != nil { + return err + } + switch str { + case "weekly": + *t = TypeWeekly + case "daily": + *t = TypeDaily + case "hourly": + *t = TypeHourly + default: + return validation.NewFieldError("Type", "unknown rotation type "+str) + } + + return nil +} + +// MarshalGQL implements the graphql.Marshaler interface +func (t Type) MarshalGQL(w io.Writer) { + switch t { + case TypeWeekly: + graphql.MarshalString("weekly").MarshalGQL(w) + case TypeHourly: + graphql.MarshalString("hourly").MarshalGQL(w) + case TypeDaily: + graphql.MarshalString("daily").MarshalGQL(w) + } +} diff --git a/schedule/rule/clock.go b/schedule/rule/clock.go new file mode 100644 index 0000000000..0ee3a91e17 --- /dev/null +++ b/schedule/rule/clock.go @@ -0,0 +1,99 @@ +package rule + +import ( + "database/sql/driver" + "fmt" + "io" + "time" + + "github.com/pkg/errors" +) + +// Clock represents wall-clock time. It is a duration since midnight. +type Clock time.Duration + +// ParseClock will return a new Clock value given a value in the format of '15:04' or '15:04:05'. +// The resulting value will be truncated to the minute. +func ParseClock(value string) (Clock, error) { + var h, m int + var s float64 + n, err := fmt.Sscanf(value, "%d:%d:%f", &h, &m, &s) + if n == 2 && err == io.ErrUnexpectedEOF { + err = nil + } + if err != nil { + return 0, err + } + if n < 2 { + return 0, errors.New("invalid time format") + } + if n == 3 && (s < 0 || s >= 60) { + return 0, errors.New("invalid seconds value") + } + + if h < 0 || h > 23 { + return 0, errors.New("invalid hours value") + } + if m < 0 || m > 59 { + return 0, errors.New("invalid minutes value") + } + + return NewClock(h, m), nil +} + +// String returns a string representation of the format '15:04'. +func (c Clock) String() string { + return fmt.Sprintf("%02d:%02d", c.Hour(), c.Minute()) +} + +// NewClock returns a Clock value equal to the provided 24-hour value and minute. +func NewClock(hour, minute int) Clock { + return Clock(time.Duration(hour)*time.Hour + time.Duration(minute)*time.Minute) +} + +// Minute returns the minute of the Clock value. +func (c Clock) Minute() int { + r := time.Duration(c) % time.Hour + return int(r / time.Minute) +} + +// Hour returns the hour of the Clock value. +func (c Clock) Hour() int { + return int(time.Duration(c) / time.Hour) +} + +// Format will format the clock value using the same format string +// used by time.Time. +func (c Clock) Format(layout string) string { + return time.Date(0, 0, 0, c.Hour(), c.Minute(), 0, 0, time.UTC).Format(layout) +} + +// Value implements the driver.Valuer interface. +func (c Clock) Value() (driver.Value, error) { + return c.String(), nil +} + +// Scan implements the sql.Scanner interface. +func (c *Clock) Scan(value interface{}) error { + var parsed Clock + var err error + switch t := value.(type) { + case []byte: + parsed, err = ParseClock(string(t)) + case string: + parsed, err = ParseClock(t) + case time.Time: + parsed = NewClock( + t.Hour(), + t.Minute(), + ) + default: + return errors.Errorf("could not scan unknown type %T as Clock", t) + } + if err != nil { + return err + } + + *c = parsed + return nil +} diff --git a/schedule/rule/rule.go b/schedule/rule/rule.go new file mode 100644 index 0000000000..247cf11059 --- /dev/null +++ b/schedule/rule/rule.go @@ -0,0 +1,224 @@ +package rule + +import ( + "database/sql" + "errors" + "fmt" + "github.com/target/goalert/assignment" + "github.com/target/goalert/validation/validate" + "time" + + "github.com/lib/pq" +) + +type Rule struct { + ID string `json:"id"` + ScheduleID string `json:"schedule_id"` + WeekdayFilter + Start Clock `json:"start"` + End Clock `json:"end"` + CreatedAt time.Time `json:"created_at"` + Target assignment.Target +} + +func NewAlwaysActive(scheduleID string, tgt assignment.Target) *Rule { + return &Rule{ + WeekdayFilter: everyDay, + ScheduleID: scheduleID, + Target: tgt, + } +} + +func (r Rule) Normalize() (*Rule, error) { + err := validate.UUID("ScheduleID", r.ScheduleID) + if err != nil { + return nil, err + } + r.Start = Clock(time.Duration(r.Start).Truncate(time.Minute)) + r.End = Clock(time.Duration(r.End).Truncate(time.Minute)) + return &r, nil +} + +type scanner interface { + Scan(...interface{}) error +} + +var errNoKnownTarget = errors.New("rule had no known target set (user or rotation)") + +func (r *Rule) scanFrom(s scanner) error { + filter := make(pq.BoolArray, 7) + f := []interface{}{ + &r.ID, + &r.ScheduleID, + &filter, + &r.Start, + &r.End, + } + var usr, rot sql.NullString + f = append(f, &usr, &rot) + err := s.Scan(f...) + if err != nil { + return err + } + for i, v := range filter { + r.SetDay(time.Weekday(i), v) + } + switch { + case usr.Valid: + r.Target = assignment.UserTarget(usr.String) + case rot.Valid: + r.Target = assignment.RotationTarget(rot.String) + default: + return errNoKnownTarget + } + return nil +} +func (r *Rule) readFields() []interface{} { + f := []interface{}{ + &r.ID, + &r.ScheduleID, + &r.WeekdayFilter[time.Sunday], + &r.WeekdayFilter[time.Monday], + &r.WeekdayFilter[time.Tuesday], + &r.WeekdayFilter[time.Wednesday], + &r.WeekdayFilter[time.Thursday], + &r.WeekdayFilter[time.Friday], + &r.WeekdayFilter[time.Saturday], + &r.Start, + &r.End, + } + var usr, rot sql.NullString + switch r.Target.TargetType() { + case assignment.TargetTypeUser: + usr.Valid = true + usr.String = r.Target.TargetID() + case assignment.TargetTypeRotation: + rot.Valid = true + rot.String = r.Target.TargetID() + } + return append(f, usr, rot) +} + +func (r Rule) everyDay() bool { + return r.WeekdayFilter == everyDay +} + +// StartTime will return the next time the rule would be active. +// If the rule is currently active, it will return the time it +// became active (in the past). +// +// If the rule is NeverActive or AlwaysActive, zero time is returned. +func (r Rule) StartTime(t time.Time) time.Time { + if r.NeverActive() { + return time.Time{} + } + if r.AlwaysActive() { + return time.Time{} + } + t = t.Truncate(time.Minute) + start := time.Date(t.Year(), t.Month(), t.Day(), r.Start.Hour(), r.Start.Minute(), 0, 0, t.Location()) + + if r.IsActive(t) { + if start.After(t) { + start = start.AddDate(0, 0, -1) + } + if r.everyDay() { + return start + } + if r.Start == r.End { + start = start.AddDate(0, 0, -r.DaysSince(start.Weekday(), false)+1) + } + } else { + if start.Before(t) { + start = start.AddDate(0, 0, 1) + } + if r.everyDay() { + return start + } + + start = start.AddDate(0, 0, r.DaysUntil(start.Weekday(), true)) + } + + return start +} + +// EndTime will return the next time the rule would be inactive. +// If the rule is currently inactive, it will return the end +// of the next shift. +func (r Rule) EndTime(t time.Time) time.Time { + if r.NeverActive() { + return time.Time{} + } + if r.AlwaysActive() { + return time.Time{} + } + + start := r.StartTime(t) + end := time.Date(start.Year(), start.Month(), start.Day(), r.End.Hour(), r.End.Minute(), 0, 0, t.Location()) + if !end.After(start) { + end = end.AddDate(0, 0, 1) + } + + if r.everyDay() { + return end + } + + if r.Start == r.End { + end = end.AddDate(0, 0, r.DaysUntil(start.Weekday(), false)-1) + } + + return end +} + +// NeverActive returns true if the rule will never be active. +func (r Rule) NeverActive() bool { return r.WeekdayFilter == neverDays } + +// AlwaysActive will return true if the rule will always be active. +func (r Rule) AlwaysActive() bool { return r.WeekdayFilter == everyDay && r.Start == r.End } + +// IsActive determines if the rule is active in the given moment in time, in the location of t. +func (r Rule) IsActive(t time.Time) bool { + if r.NeverActive() { + return false + } + if r.AlwaysActive() { + return true + } + t = t.Truncate(time.Minute) + + c := NewClock(t.Hour(), t.Minute()) + if r.Start >= r.End { // overnight + prevDay := (t.Weekday() - 1) % 7 + if prevDay < 0 { + prevDay += 7 + } + return (r.Day(t.Weekday()) && c >= r.Start) || (r.Day(prevDay) && c < r.End) + } + + return r.Day(t.Weekday()) && c >= r.Start && c < r.End +} + +// String returns a human-readable string describing the rule +func (r Rule) String() string { + if r.AlwaysActive() { + return "Always" + } + if r.NeverActive() { + return "Never" + } + + var startStr, endStr string + if r.Start.Minute() == 0 { + startStr = r.Start.Format("3pm") + } else { + startStr = r.Start.Format("3:04pm") + } + + if r.End.Minute() == 0 { + endStr = r.End.Format("3pm") + } else { + endStr = r.End.Format("3:04pm") + } + + return fmt.Sprintf("%s-%s %s", startStr, endStr, r.WeekdayFilter.String()) +} diff --git a/schedule/rule/rule_test.go b/schedule/rule/rule_test.go new file mode 100644 index 0000000000..2a20ad9505 --- /dev/null +++ b/schedule/rule/rule_test.go @@ -0,0 +1,402 @@ +package rule + +import ( + "testing" + "time" +) + +const timeFmt = "Mon Jan _2 3:04PM 2006" + +func TestRule_IsActive(t *testing.T) { + + test := func(r Rule, tm time.Time, expected bool) { + name := r.String() + "/" + tm.Format(timeFmt) + if r.Start > r.End { + name += "(overnight)" + } + t.Run(name, func(t *testing.T) { + result := r.IsActive(tm) + if result != expected { + t.Errorf("got '%t'; want '%t'", result, expected) + } + }) + } + + r := Rule{ + Start: NewClock(8, 0), + End: NewClock(20, 0), + } + r.SetDay(time.Monday, true) + + data := []struct { + Time time.Time + Active bool + }{ + {Time: time.Date(2017, 7, 20, 8, 0, 0, 0, time.UTC), Active: false}, // before + {Time: time.Date(2017, 7, 25, 8, 0, 0, 0, time.UTC), Active: false}, // after + {Time: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC), Active: true}, // eq start + {Time: time.Date(2017, 7, 24, 20, 0, 0, 0, time.UTC), Active: false}, // eq end + {Time: time.Date(2017, 7, 24, 9, 0, 0, 0, time.UTC), Active: true}, // middle + } + + for _, d := range data { + test(r, d.Time, d.Active) + } + // overnight + r.Start, r.End = r.End, r.Start + data = []struct { + Time time.Time + Active bool + }{ + {Time: time.Date(2017, 7, 20, 8, 0, 0, 0, time.UTC), Active: false}, // before + {Time: time.Date(2017, 7, 26, 8, 0, 0, 0, time.UTC), Active: false}, // after + {Time: time.Date(2017, 7, 24, 20, 0, 0, 0, time.UTC), Active: true}, // eq start + {Time: time.Date(2017, 7, 25, 8, 0, 0, 0, time.UTC), Active: false}, // eq end + {Time: time.Date(2017, 7, 24, 9, 0, 0, 0, time.UTC), Active: false}, // middle (wrong side) + {Time: time.Date(2017, 7, 24, 21, 0, 0, 0, time.UTC), Active: true}, // middle + {Time: time.Date(2017, 7, 25, 7, 0, 0, 0, time.UTC), Active: true}, // middle (next day) + } + for _, d := range data { + test(r, d.Time, d.Active) + } + + // weekday filters + r = Rule{ + Start: NewClock(8, 0), + End: NewClock(20, 0), + } + r.WeekdayFilter = WeekdayFilter{0, 1, 1, 1, 1, 1, 0} // M-F + + data = []struct { + Time time.Time + Active bool + }{ + {Time: time.Date(2017, 7, 23, 8, 0, 0, 0, time.UTC), Active: false}, // sun + {Time: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC), Active: true}, // mon + {Time: time.Date(2017, 7, 25, 8, 0, 0, 0, time.UTC), Active: true}, // tues + {Time: time.Date(2017, 7, 26, 8, 0, 0, 0, time.UTC), Active: true}, // wed + {Time: time.Date(2017, 7, 27, 8, 0, 0, 0, time.UTC), Active: true}, // thurs + {Time: time.Date(2017, 7, 28, 8, 0, 0, 0, time.UTC), Active: true}, // fri + {Time: time.Date(2017, 7, 29, 8, 0, 0, 0, time.UTC), Active: false}, // sat + } + for _, d := range data { + test(r, d.Time, d.Active) + } + + // contig. filters + r = Rule{ + Start: NewClock(8, 0), + End: NewClock(8, 0), + } + r.WeekdayFilter = WeekdayFilter{0, 1, 1, 1, 1, 1, 0} // M-F + + data = []struct { + Time time.Time + Active bool + }{ + {Time: time.Date(2017, 7, 23, 8, 0, 0, 0, time.UTC), Active: false}, // sun + {Time: time.Date(2017, 7, 24, 7, 0, 0, 0, time.UTC), Active: false}, // mon (morn) + {Time: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC), Active: true}, // mon + {Time: time.Date(2017, 7, 25, 8, 0, 0, 0, time.UTC), Active: true}, // tues + {Time: time.Date(2017, 7, 26, 8, 0, 0, 0, time.UTC), Active: true}, // wed + {Time: time.Date(2017, 7, 27, 8, 0, 0, 0, time.UTC), Active: true}, // thurs + {Time: time.Date(2017, 7, 28, 8, 0, 0, 0, time.UTC), Active: true}, // fri + {Time: time.Date(2017, 7, 29, 7, 0, 0, 0, time.UTC), Active: true}, // sat (morn) + {Time: time.Date(2017, 7, 29, 8, 0, 0, 0, time.UTC), Active: false}, // sat + } + for _, d := range data { + test(r, d.Time, d.Active) + } + + // weekday overnight + r = Rule{ + Start: NewClock(20, 0), + End: NewClock(8, 0), + } + r.WeekdayFilter = WeekdayFilter{0, 1, 1, 1, 1, 1, 0} // M-F + + data = []struct { + Time time.Time + Active bool + }{ + {Time: time.Date(2017, 7, 23, 20, 0, 0, 0, time.UTC), Active: false}, // sun-mon + {Time: time.Date(2017, 7, 24, 7, 0, 0, 0, time.UTC), Active: false}, + + {Time: time.Date(2017, 7, 24, 20, 0, 0, 0, time.UTC), Active: true}, // mon-tues + {Time: time.Date(2017, 7, 25, 7, 0, 0, 0, time.UTC), Active: true}, + + {Time: time.Date(2017, 7, 25, 20, 0, 0, 0, time.UTC), Active: true}, // tues-wed + {Time: time.Date(2017, 7, 26, 7, 0, 0, 0, time.UTC), Active: true}, + + {Time: time.Date(2017, 7, 26, 20, 0, 0, 0, time.UTC), Active: true}, // wed-thurs + {Time: time.Date(2017, 7, 27, 7, 0, 0, 0, time.UTC), Active: true}, + + {Time: time.Date(2017, 7, 27, 20, 0, 0, 0, time.UTC), Active: true}, // thurs-fri + {Time: time.Date(2017, 7, 28, 7, 0, 0, 0, time.UTC), Active: true}, + + {Time: time.Date(2017, 7, 28, 20, 0, 0, 0, time.UTC), Active: true}, // fri-sat + {Time: time.Date(2017, 7, 29, 7, 0, 0, 0, time.UTC), Active: true}, + + {Time: time.Date(2017, 7, 29, 20, 0, 0, 0, time.UTC), Active: false}, // sat-sun + {Time: time.Date(2017, 7, 30, 7, 0, 0, 0, time.UTC), Active: false}, + } + + for _, d := range data { + test(r, d.Time, d.Active) + } +} + +func TestRule_StartTime(t *testing.T) { + + test := func(r Rule, start, expected time.Time) { + name := r.String() + "/" + start.Format(timeFmt) + if r.Start > r.End { + name += "(overnight)" + } + t.Run(name, func(t *testing.T) { + result := r.StartTime(start) + if !result.Equal(expected) { + t.Errorf("got '%s'; want '%s'", result.Format(timeFmt), expected.Format(timeFmt)) + } + }) + } + + test(Rule{ + Start: NewClock(8, 0), + End: NewClock(20, 0), + WeekdayFilter: WeekdayFilter{0, 1, 1, 0, 0, 0, 0}, + }, + time.Date(2017, 7, 25, 8, 0, 0, 0, time.UTC), + time.Date(2017, 7, 25, 8, 0, 0, 0, time.UTC), + ) + + r := Rule{ + Start: NewClock(8, 0), + End: NewClock(20, 0), + } + r.SetDay(time.Monday, true) + // jul 24 2017 is a Monday + + data := []struct{ Start, Expected time.Time }{ + + // should be next monday shift + {Start: time.Date(2017, 7, 20, 8, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC)}, + + {Start: time.Date(2017, 7, 24, 7, 59, 0, 0, time.UTC), Expected: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 24, 8, 1, 0, 0, time.UTC), Expected: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC)}, + + {Start: time.Date(2017, 7, 24, 19, 59, 0, 0, time.UTC), Expected: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC)}, + + // following monday + {Start: time.Date(2017, 7, 24, 20, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 31, 8, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 24, 20, 1, 0, 0, time.UTC), Expected: time.Date(2017, 7, 31, 8, 0, 0, 0, time.UTC)}, + } + for _, d := range data { + test(r, d.Start, d.Expected) + } + + r = Rule{ + Start: NewClock(8, 0), + End: NewClock(20, 0), + } + r.SetDay(time.Friday, true) + r.SetDay(time.Saturday, true) + r.SetDay(time.Monday, true) + + data = []struct{ Start, Expected time.Time }{ + // should be next monday shift + {Start: time.Date(2017, 7, 21, 20, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 22, 8, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 22, 20, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC)}, + + {Start: time.Date(2017, 7, 24, 20, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 28, 8, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 25, 20, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 28, 8, 0, 0, 0, time.UTC)}, + } + for _, d := range data { + test(r, d.Start, d.Expected) + } + + r = Rule{ + Start: NewClock(8, 0), + End: NewClock(8, 0), + } + r.SetDay(time.Monday, true) + r.SetDay(time.Tuesday, true) + + data = []struct{ Start, Expected time.Time }{ + // should be next monday shift + {Start: time.Date(2017, 7, 20, 8, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC)}, + + {Start: time.Date(2017, 7, 24, 7, 59, 0, 0, time.UTC), Expected: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 24, 8, 1, 0, 0, time.UTC), Expected: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC)}, + + {Start: time.Date(2017, 7, 24, 19, 59, 0, 0, time.UTC), Expected: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC)}, + + // following monday + {Start: time.Date(2017, 7, 24, 20, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 24, 20, 1, 0, 0, time.UTC), Expected: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC)}, + + {Start: time.Date(2017, 7, 25, 20, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 25, 20, 1, 0, 0, time.UTC), Expected: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 26, 20, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 31, 8, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 26, 20, 1, 0, 0, time.UTC), Expected: time.Date(2017, 7, 31, 8, 0, 0, 0, time.UTC)}, + } + for _, d := range data { + test(r, d.Start, d.Expected) + } + + r = Rule{ + Start: NewClock(8, 0), + End: NewClock(20, 0), + } + r.WeekdayFilter = everyDay + data = []struct{ Start, Expected time.Time }{ + + {Start: time.Date(2017, 7, 20, 8, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 20, 8, 0, 0, 0, time.UTC)}, + + {Start: time.Date(2017, 7, 24, 7, 59, 0, 0, time.UTC), Expected: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 24, 8, 1, 0, 0, time.UTC), Expected: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC)}, + + {Start: time.Date(2017, 7, 24, 19, 59, 0, 0, time.UTC), Expected: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC)}, + + // following monday + {Start: time.Date(2017, 7, 24, 20, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 25, 8, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 24, 20, 1, 0, 0, time.UTC), Expected: time.Date(2017, 7, 25, 8, 0, 0, 0, time.UTC)}, + } + for _, d := range data { + test(r, d.Start, d.Expected) + } + + // overnight + r = Rule{ + Start: NewClock(20, 0), + End: NewClock(8, 0), + } + r.SetDay(time.Monday, true) + // July 24th is a Monday + + data = []struct{ Start, Expected time.Time }{ + {Start: time.Date(2017, 7, 24, 20, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 24, 20, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 25, 7, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 24, 20, 0, 0, 0, time.UTC)}, + + {Start: time.Date(2017, 7, 25, 8, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 31, 20, 0, 0, 0, time.UTC)}, + } + + for _, d := range data { + test(r, d.Start, d.Expected) + } + +} + +func TestRule_EndTime(t *testing.T) { + test := func(r Rule, start, expected time.Time) { + name := r.String() + "/" + start.Format(timeFmt) + if r.Start > r.End { + name += "(overnight)" + } + t.Run(name, func(t *testing.T) { + result := r.EndTime(start) + if !result.Equal(expected) { + t.Errorf("got '%s'; want '%s'", result.Format(timeFmt), expected.Format(timeFmt)) + } + }) + } + + r := Rule{ + Start: NewClock(8, 0), + End: NewClock(20, 0), + } + r.SetDay(time.Monday, true) + + data := []struct{ Start, Expected time.Time }{ + {Start: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 24, 20, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 20, 8, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 24, 20, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 24, 20, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 31, 20, 0, 0, 0, time.UTC)}, + } + for _, d := range data { + test(r, d.Start, d.Expected) + } + + r = Rule{ + Start: NewClock(8, 0), + End: NewClock(20, 0), + } + r.SetDay(time.Monday, true) + r.SetDay(time.Tuesday, true) + + data = []struct{ Start, Expected time.Time }{ + {Start: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 24, 20, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 20, 8, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 24, 20, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 24, 20, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 25, 20, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 25, 8, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 25, 20, 0, 0, 0, time.UTC)}, + } + for _, d := range data { + test(r, d.Start, d.Expected) + } + + r = Rule{ + Start: NewClock(8, 0), + End: NewClock(8, 0), + } + r.SetDay(time.Monday, true) + r.SetDay(time.Tuesday, true) + + data = []struct{ Start, Expected time.Time }{ + {Start: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 26, 8, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 20, 8, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 26, 8, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 24, 20, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 26, 8, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 25, 8, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 26, 8, 0, 0, 0, time.UTC)}, + } + for _, d := range data { + test(r, d.Start, d.Expected) + } + + r = Rule{ + Start: NewClock(8, 0), + End: NewClock(20, 0), + WeekdayFilter: everyDay, + } + + data = []struct{ Start, Expected time.Time }{ + {Start: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 24, 20, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 20, 8, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 20, 20, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 24, 20, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 25, 20, 0, 0, 0, time.UTC)}, + } + + for _, d := range data { + test(r, d.Start, d.Expected) + } + + // overnight + r = Rule{ + Start: NewClock(20, 0), + End: NewClock(8, 0), + } + r.SetDay(time.Monday, true) + + data = []struct{ Start, Expected time.Time }{ + {Start: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 25, 8, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 25, 8, 0, 0, 0, time.UTC), Expected: time.Date(2017, 8, 1, 8, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 20, 8, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 25, 8, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 24, 20, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 25, 8, 0, 0, 0, time.UTC)}, + } + + for _, d := range data { + test(r, d.Start, d.Expected) + } + + r.WeekdayFilter = everyDay + + data = []struct{ Start, Expected time.Time }{ + {Start: time.Date(2017, 7, 24, 8, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 25, 8, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 20, 8, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 21, 8, 0, 0, 0, time.UTC)}, + {Start: time.Date(2017, 7, 24, 20, 0, 0, 0, time.UTC), Expected: time.Date(2017, 7, 25, 8, 0, 0, 0, time.UTC)}, + } + + for _, d := range data { + test(r, d.Start, d.Expected) + } + +} diff --git a/schedule/rule/store.go b/schedule/rule/store.go new file mode 100644 index 0000000000..8b305c374f --- /dev/null +++ b/schedule/rule/store.go @@ -0,0 +1,477 @@ +package rule + +import ( + "context" + "database/sql" + "github.com/target/goalert/assignment" + "github.com/target/goalert/permission" + "github.com/target/goalert/util" + "github.com/target/goalert/validation/validate" + + "github.com/lib/pq" + uuid "github.com/satori/go.uuid" +) + +type Store interface { + ReadStore + Add(context.Context, *Rule) (*Rule, error) + CreateRuleTx(context.Context, *sql.Tx, *Rule) (*Rule, error) + Update(context.Context, *Rule) error + UpdateTx(context.Context, *sql.Tx, *Rule) error + Delete(context.Context, string) error + DeleteTx(context.Context, *sql.Tx, string) error + DeleteManyTx(context.Context, *sql.Tx, []string) error + DeleteByTarget(ctx context.Context, scheduleID string, target assignment.Target) error + FindByTargetTx(ctx context.Context, tx *sql.Tx, scheduleID string, target assignment.Target) ([]Rule, error) +} +type ReadStore interface { + FindScheduleID(context.Context, string) (string, error) + FindOne(context.Context, string) (*Rule, error) + FindAll(ctx context.Context, scheduleID string) ([]Rule, error) + FindAllTx(ctx context.Context, tx *sql.Tx, scheduleID string) ([]Rule, error) + + // FindAllWithUsers works like FindAll but resolves rotations to the active user. + // This is reflected in the Target attribute. + // Rules pointing to inactive rotations (no participants) are omitted. + FindAllWithUsers(ctx context.Context, scheduleID string) ([]Rule, error) +} +type ScheduleTriggerFunc func(string) +type DB struct { + db *sql.DB + + add *sql.Stmt + update *sql.Stmt + delete *sql.Stmt + findOne *sql.Stmt + findAll *sql.Stmt + findTgt *sql.Stmt + + deleteAssignmentByTarget *sql.Stmt + + findAllUsers *sql.Stmt + + findScheduleID *sql.Stmt +} + +func NewDB(ctx context.Context, db *sql.DB) (*DB, error) { + p := &util.Prepare{DB: db, Ctx: ctx} + return &DB{ + db: db, + + findScheduleID: p.P(` + select schedule_id + from schedule_rules + where id = $1 + `), + add: p.P(` + insert into schedule_rules ( + id, + schedule_id, + sunday, + monday, + tuesday, + wednesday, + thursday, + friday, + saturday, + start_time, + end_time, + tgt_user_id, + tgt_rotation_id + ) values ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13) + `), + update: p.P(` + update schedule_rules + set + schedule_id = $2, + sunday = $3, + monday = $4, + tuesday = $5, + wednesday = $6, + thursday = $7, + friday = $8, + saturday = $9, + start_time = $10, + end_time = $11, + tgt_user_id = $12, + tgt_rotation_id = $13 + where id = $1 + `), + delete: p.P(`delete from schedule_rules where id = any($1)`), + deleteAssignmentByTarget: p.P(` + delete from schedule_rules + where + schedule_id = $1 and + (tgt_user_id = $2 or + tgt_rotation_id = $3) + `), + findOne: p.P(` + select + id, + schedule_id, + ARRAY[ + sunday, + monday, + tuesday, + wednesday, + thursday, + friday, + saturday + ], + start_time, + end_time, + tgt_user_id, + tgt_rotation_id + from schedule_rules + where id = $1 + `), + + findAll: p.P(` + select + id, + schedule_id, + ARRAY[ + sunday, + monday, + tuesday, + wednesday, + thursday, + friday, + saturday + ], + start_time, + end_time, + tgt_user_id, + tgt_rotation_id + from schedule_rules + where schedule_id = $1 + order by created_at + `), + findTgt: p.P(` + select + id, + schedule_id, + ARRAY[ + sunday, + monday, + tuesday, + wednesday, + thursday, + friday, + saturday + ], + start_time, + end_time, + tgt_user_id, + tgt_rotation_id + from schedule_rules + where schedule_id = $1 AND (tgt_user_id = $2 OR tgt_rotation_id = $3) + `), + findAllUsers: p.P(` + with rotation_users as ( + select + s.rotation_id, + p.user_id + from rotation_state s + join rotation_participants p on s.rotation_participant_id = p.id + ) + select + id, + schedule_id, + ARRAY[ + sunday, + monday, + tuesday, + wednesday, + thursday, + friday, + saturday + ], + start_time, + end_time, + case when tgt_user_id is not null then + tgt_user_id + else + rUser.user_id + end, + null + from schedule_rules r + left join rotation_users rUser on rUser.rotation_id = r.tgt_rotation_id + where schedule_id = $1 + order by created_at + `), + }, p.Err +} + +func (db *DB) FindScheduleID(ctx context.Context, ruleID string) (string, error) { + err := validate.UUID("RuleID", ruleID) + if err != nil { + return "", err + } + err = permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return "", err + } + row := db.findScheduleID.QueryRowContext(ctx, ruleID) + var schedID string + err = row.Scan(&schedID) + if err != nil { + return "", err + } + return schedID, nil +} + +func (db *DB) _Add(ctx context.Context, s *sql.Stmt, r *Rule) (*Rule, error) { + n, err := r.Normalize() + if err != nil { + return nil, err + } + + err = permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + + n.ID = uuid.NewV4().String() + _, err = s.ExecContext(ctx, n.readFields()...) + if err != nil { + return nil, err + } + + return n, nil +} + +func (db *DB) Add(ctx context.Context, r *Rule) (*Rule, error) { + r, err := db._Add(ctx, db.add, r) + if err != nil { + return nil, err + } + return r, nil +} + +func (db *DB) CreateRuleTx(ctx context.Context, tx *sql.Tx, r *Rule) (*Rule, error) { + return db._Add(ctx, tx.Stmt(db.add), r) +} + +func (db *DB) FindByTargetTx(ctx context.Context, tx *sql.Tx, scheduleID string, target assignment.Target) ([]Rule, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + err = validate.Many( + validate.UUID("ScheduleID", scheduleID), + validate.OneOf("TargetType", target.TargetType(), assignment.TargetTypeUser, assignment.TargetTypeRotation), + validate.UUID("TargetID", target.TargetID()), + ) + if err != nil { + return nil, err + } + + stmt := db.findTgt + if tx != nil { + stmt = tx.StmtContext(ctx, stmt) + } + + var tgtUser, tgtRot sql.NullString + switch target.TargetType() { + case assignment.TargetTypeUser: + tgtUser.Valid = true + tgtUser.String = target.TargetID() + case assignment.TargetTypeRotation: + tgtRot.Valid = true + tgtRot.String = target.TargetID() + } + + rows, err := stmt.QueryContext(ctx, scheduleID, tgtUser, tgtRot) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, err + } + defer rows.Close() + + var result []Rule + var r Rule + for rows.Next() { + err = r.scanFrom(rows) + if err != nil { + return nil, err + } + result = append(result, r) + } + + return result, nil +} + +// DeleteByTarget removes all rules for a schedule pointing to the specified target. +func (db *DB) DeleteByTarget(ctx context.Context, scheduleID string, target assignment.Target) error { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return err + } + + err = validate.Many( + validate.UUID("ScheduleID", scheduleID), + validate.OneOf("TargetType", target.TargetType(), assignment.TargetTypeUser, assignment.TargetTypeRotation), + validate.UUID("TargetID", target.TargetID()), + ) + if err != nil { + return err + } + + var tgtUser, tgtRot sql.NullString + + switch target.TargetType() { + case assignment.TargetTypeUser: + tgtUser.Valid = true + tgtUser.String = target.TargetID() + case assignment.TargetTypeRotation: + tgtRot.Valid = true + tgtRot.String = target.TargetID() + } + _, err = db.deleteAssignmentByTarget.ExecContext(ctx, scheduleID, tgtUser, tgtRot) + if err != nil { + return err + } + return nil +} + +func (db *DB) Delete(ctx context.Context, ruleID string) error { + return db.DeleteTx(ctx, nil, ruleID) +} +func (db *DB) DeleteTx(ctx context.Context, tx *sql.Tx, ruleID string) error { + return db.DeleteManyTx(ctx, tx, []string{ruleID}) +} +func (db *DB) DeleteManyTx(ctx context.Context, tx *sql.Tx, ruleIDs []string) error { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return err + } + if len(ruleIDs) == 0 { + return nil + } + err = validate.ManyUUID("RuleIDs", ruleIDs, 50) + if err != nil { + return err + } + s := db.delete + if tx != nil { + s = tx.StmtContext(ctx, s) + } + _, err = s.ExecContext(ctx, pq.StringArray(ruleIDs)) + return err + +} + +func (db *DB) UpdateTx(ctx context.Context, tx *sql.Tx, r *Rule) error { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return err + } + n, err := r.Normalize() + if err != nil { + return err + } + + f := n.readFields() + + stmt := db.update + if tx != nil { + stmt = tx.StmtContext(ctx, stmt) + } + _, err = stmt.ExecContext(ctx, f...) + if err != nil { + return err + } + return nil +} +func (db *DB) Update(ctx context.Context, r *Rule) error { + return db.UpdateTx(ctx, nil, r) +} + +func (db *DB) FindOne(ctx context.Context, ruleID string) (*Rule, error) { + err := validate.UUID("RuleID", ruleID) + if err != nil { + return nil, err + } + err = permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + var r Rule + err = r.scanFrom(db.findOne.QueryRowContext(ctx, ruleID)) + if err != nil { + return nil, err + } + return &r, nil +} + +func (db *DB) FindAllWithUsers(ctx context.Context, scheduleID string) ([]Rule, error) { + err := validate.UUID("ScheduleID", scheduleID) + if err != nil { + return nil, err + } + err = permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + rows, err := db.findAllUsers.QueryContext(ctx, scheduleID) + if err != nil { + return nil, err + } + defer rows.Close() + + var result []Rule + var r Rule + for rows.Next() { + err = r.scanFrom(rows) + if err == errNoKnownTarget { + err = nil + } + if err != nil { + return nil, err + } + if r.Target == nil || r.Target.TargetType() != assignment.TargetTypeUser { + continue + } + result = append(result, r) + } + + return result, nil +} +func (db *DB) FindAll(ctx context.Context, scheduleID string) ([]Rule, error) { + return db.FindAllTx(ctx, nil, scheduleID) +} + +func (db *DB) FindAllTx(ctx context.Context, tx *sql.Tx, scheduleID string) ([]Rule, error) { + err := validate.UUID("ScheduleID", scheduleID) + if err != nil { + return nil, err + } + err = permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + stmt := db.findAll + if tx != nil { + stmt = tx.StmtContext(ctx, stmt) + } + rows, err := stmt.QueryContext(ctx, scheduleID) + if err != nil { + return nil, err + } + defer rows.Close() + + var result []Rule + var r Rule + for rows.Next() { + err = r.scanFrom(rows) + if err != nil { + return nil, err + } + result = append(result, r) + } + + return result, nil +} diff --git a/schedule/rule/weekdayfilter.go b/schedule/rule/weekdayfilter.go new file mode 100644 index 0000000000..66c5cc3036 --- /dev/null +++ b/schedule/rule/weekdayfilter.go @@ -0,0 +1,129 @@ +package rule + +import ( + "bytes" + "database/sql/driver" + "strings" + "time" + + "github.com/lib/pq" +) + +type WeekdayFilter [7]byte + +var ( + neverDays = WeekdayFilter([7]byte{}) + everyDay = WeekdayFilter([7]byte{1, 1, 1, 1, 1, 1, 1}) +) + +// Day will return true if the given weekday is enabled. +func (f WeekdayFilter) Day(d time.Weekday) bool { + return f[int(d)] == 1 +} + +// SetDay will update the filter for the given weekday. +func (f *WeekdayFilter) SetDay(d time.Weekday, enabled bool) { + if enabled { + f[int(d)] = 1 + } else { + f[int(d)] = 0 + } +} + +// DaysUntil will give the number of days until +// a matching day from the given weekday. -1 is returned +// if no days match. +func (f WeekdayFilter) DaysUntil(d time.Weekday, enabled bool) int { + if enabled && f == neverDays { + return -1 + } + if !enabled && f == everyDay { + return -1 + } + var val byte + if enabled { + val = 1 + } + idx := bytes.IndexByte(f[d:], val) + if idx > -1 { + return idx + } + + idx = bytes.IndexByte(f[:], val) + return 7 - int(d) + idx +} + +// DaysSince will give the number of days since +// an enabled day from the given weekday. -1 is returned +// if all days are disabled. +func (f WeekdayFilter) DaysSince(d time.Weekday, enabled bool) int { + if enabled && f == neverDays { + return -1 + } + if !enabled && f == everyDay { + return -1 + } + + var val byte + if enabled { + val = 1 + } + idx := bytes.LastIndexByte(f[:d+1], val) + if idx > -1 { + return int(d) - idx + } + + idx = bytes.LastIndexByte(f[d+1:], val) + return 6 - idx +} + +// String returns a string representation of the WeekdayFilter. +func (f WeekdayFilter) String() string { + switch f { + case WeekdayFilter{1, 0, 0, 0, 0, 0, 1}: + return "weekends" + case neverDays: + return "never" + case everyDay: + return "every day" + case WeekdayFilter{0, 1, 1, 1, 1, 1, 0}: + return "M-F" + case WeekdayFilter{0, 1, 1, 1, 1, 1, 1}: + return "M-F and Sat" + case WeekdayFilter{1, 1, 1, 1, 1, 1, 0}: + return "M-F and Sun" + } + var days []string + var chain []time.Weekday + flushChain := func() { + if len(chain) < 3 { + for _, wd := range chain { + days = append(days, wd.String()[:3]) + } + chain = chain[:0] + return + } + + days = append(days, chain[0].String()[:3]+"-"+chain[len(chain)-1].String()[:3]) + chain = chain[:0] + } + for d, act := range f { + if act == 1 { + chain = append(chain, time.Weekday(d)) + continue + } + flushChain() + } + flushChain() + + return strings.Join(days, ",") +} + +// Value converts the WeekdayFilter to a DB array of bool. +func (f WeekdayFilter) Value() (driver.Value, error) { + res := make(pq.BoolArray, 7) + for i, v := range f { + res[i] = v == 1 + } + return res, nil +} diff --git a/schedule/rule/weekdayfilter_test.go b/schedule/rule/weekdayfilter_test.go new file mode 100644 index 0000000000..c2a774489c --- /dev/null +++ b/schedule/rule/weekdayfilter_test.go @@ -0,0 +1,90 @@ +package rule + +import ( + "fmt" + "strings" + "testing" + "time" +) + +func TestWeekdayFilter_String(t *testing.T) { + check := func(f WeekdayFilter, exp string) { + var name strings.Builder + for i := range f { + if f[i] == 1 { + name.WriteString(time.Weekday(i).String()[:1]) + } else { + name.WriteByte('_') + } + } + t.Run(name.String(), func(t *testing.T) { + res := f.String() + if res != exp { + t.Errorf("got '%s'; want '%s'", res, exp) + } + }) + } + + check(everyDay, "every day") + check(neverDays, "never") + check(WeekdayFilter{1, 0, 0, 0, 0, 0, 1}, "weekends") + check(WeekdayFilter{0, 1, 1, 1, 1, 1, 0}, "M-F") + check(WeekdayFilter{1, 1, 0, 0, 0, 1, 0}, "Sun,Mon,Fri") +} + +func TestWeekdayFilter_DaysUntil(t *testing.T) { + check := func(f WeekdayFilter, in time.Weekday, e bool, exp int) { + t.Run(fmt.Sprintf("%s/From%s-%t", f, in, e), func(t *testing.T) { + res := f.DaysUntil(in, e) + if res != exp { + t.Errorf("got %d; want %d", res, exp) + } + }) + } + + check(everyDay, time.Monday, true, 0) + check(neverDays, time.Monday, true, -1) + check(WeekdayFilter{1, 0, 0, 0, 0, 0, 0}, time.Monday, true, 6) + check(WeekdayFilter{0, 1, 0, 0, 0, 0, 0}, time.Monday, true, 0) + check(WeekdayFilter{0, 0, 1, 0, 0, 0, 0}, time.Monday, true, 1) + check(WeekdayFilter{1, 0, 1, 0, 0, 1, 0}, time.Monday, true, 1) + + check(everyDay, time.Monday, false, -1) + check(neverDays, time.Monday, false, 0) + check(WeekdayFilter{1, 0, 0, 0, 0, 0, 0}, time.Monday, false, 0) + check(WeekdayFilter{0, 1, 0, 0, 0, 0, 0}, time.Monday, false, 1) + check(WeekdayFilter{0, 0, 1, 0, 0, 0, 0}, time.Monday, false, 0) + check(WeekdayFilter{1, 0, 1, 0, 0, 1, 0}, time.Monday, false, 0) + check(WeekdayFilter{0, 1, 1, 1, 1, 1, 1}, time.Monday, false, 6) +} + +func TestWeekdayFilter_DaysSince(t *testing.T) { + check := func(f WeekdayFilter, in time.Weekday, e bool, exp int) { + t.Run(fmt.Sprintf("%s/From%s-%t", f, in, e), func(t *testing.T) { + res := f.DaysSince(in, e) + if res != exp { + t.Errorf("got %d; want %d", res, exp) + } + }) + } + + check(everyDay, time.Monday, true, 0) + check(neverDays, time.Monday, true, -1) + check(WeekdayFilter{1, 0, 0, 0, 0, 0, 0}, time.Monday, true, 1) + check(WeekdayFilter{0, 1, 0, 0, 0, 0, 0}, time.Monday, true, 0) + check(WeekdayFilter{0, 0, 1, 0, 0, 0, 0}, time.Monday, true, 6) + check(WeekdayFilter{1, 0, 1, 0, 0, 1, 0}, time.Monday, true, 1) + check(WeekdayFilter{0, 0, 1, 0, 0, 1, 0}, time.Monday, true, 3) + check(WeekdayFilter{0, 0, 1, 0, 0, 1, 1}, time.Monday, true, 2) + + check(everyDay, time.Monday, false, -1) + check(neverDays, time.Monday, false, 0) + check(WeekdayFilter{1, 0, 0, 0, 0, 0, 0}, time.Monday, false, 0) + check(WeekdayFilter{0, 1, 0, 0, 0, 0, 0}, time.Monday, false, 1) + check(WeekdayFilter{0, 0, 1, 0, 0, 0, 0}, time.Monday, false, 0) + check(WeekdayFilter{1, 0, 1, 0, 0, 1, 0}, time.Monday, false, 0) + check(WeekdayFilter{0, 0, 1, 0, 0, 1, 0}, time.Monday, false, 0) + check(WeekdayFilter{0, 0, 1, 0, 0, 1, 1}, time.Monday, false, 0) + check(WeekdayFilter{1, 1, 1, 0, 0, 1, 1}, time.Monday, false, 4) + +} diff --git a/schedule/schedule.go b/schedule/schedule.go new file mode 100644 index 0000000000..17b4a18944 --- /dev/null +++ b/schedule/schedule.go @@ -0,0 +1,30 @@ +package schedule + +import ( + "github.com/target/goalert/validation" + "github.com/target/goalert/validation/validate" + "time" +) + +type Schedule struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + TimeZone *time.Location `json:"time_zone"` +} + +func (s Schedule) Normalize() (*Schedule, error) { + err := validate.Many( + validate.IDName("Name", s.Name), + validate.Text("Description", s.Description, 1, 255), + ) + if err != nil { + return nil, err + } + + if s.TimeZone == nil { + return nil, validation.NewFieldError("TimeZone", "must be specified") + } + + return &s, nil +} diff --git a/schedule/schedule_test.go b/schedule/schedule_test.go new file mode 100644 index 0000000000..eac56df0cc --- /dev/null +++ b/schedule/schedule_test.go @@ -0,0 +1,30 @@ +package schedule + +import ( + "testing" + "time" +) + +func TestSchedule_Normalize(t *testing.T) { + test := func(valid bool, name string, s Schedule) { + t.Run(name, func(t *testing.T) { + _, err := s.Normalize() + if valid && err != nil { + t.Errorf("err = %v; want nil", err) + } else if !valid && err == nil { + t.Errorf("err = nil; want != nil") + } + }) + } + + data := []struct { + v bool + n string + s Schedule + }{ + {false, "missing name", Schedule{Description: "hello", TimeZone: time.Local}}, + } + for _, d := range data { + test(d.v, d.n, d.s) + } +} diff --git a/schedule/search.go b/schedule/search.go new file mode 100644 index 0000000000..2315a20ba6 --- /dev/null +++ b/schedule/search.go @@ -0,0 +1,128 @@ +package schedule + +import ( + "context" + "database/sql" + "github.com/target/goalert/permission" + "github.com/target/goalert/search" + "github.com/target/goalert/util" + "github.com/target/goalert/validation/validate" + "text/template" + + "github.com/lib/pq" + "github.com/pkg/errors" +) + +// SearchOptions allow filtering and paginating the list of schedules. +type SearchOptions struct { + Search string `json:"s,omitempty"` + After SearchCursor `json:"a,omitempty"` + + // Omit specifies a list of schedule IDs to exclude from the results. + Omit []string `json:"o,omitempty"` + + Limit int `json:"-"` +} + +// SearchCursor is used to indicate a position in a paginated list. +type SearchCursor struct { + Name string `json:"n,omitempty"` +} + +var searchTemplate = template.Must(template.New("search").Parse(` + SELECT + id, name, description, time_zone + FROM schedules sched + WHERE true + {{if .Omit}} + AND not id = any(:omit) + {{end}} + {{if .SearchStr}} + AND (sched.name ILIKE :search OR sched.description ILIKE :search) + {{end}} + {{if .After.Name}} + AND lower(sched.name) > lower(:afterName) + {{end}} + ORDER BY lower(sched.name) + LIMIT {{.Limit}} +`)) + +type renderData SearchOptions + +func (opts renderData) SearchStr() string { + if opts.Search == "" { + return "" + } + + return "%" + search.Escape(opts.Search) + "%" +} + +func (opts renderData) Normalize() (*renderData, error) { + if opts.Limit == 0 { + opts.Limit = search.DefaultMaxResults + } + + err := validate.Many( + validate.Text("Search", opts.Search, 0, search.MaxQueryLen), + validate.Range("Limit", opts.Limit, 0, search.MaxResults), + validate.ManyUUID("Omit", opts.Omit, 50), + ) + if opts.After.Name != "" { + err = validate.Many(err, validate.IDName("After.Name", opts.After.Name)) + } + + return &opts, err +} + +func (opts renderData) QueryArgs() []sql.NamedArg { + return []sql.NamedArg{ + sql.Named("search", opts.SearchStr()), + sql.Named("afterName", opts.After.Name), + sql.Named("omit", pq.StringArray(opts.Omit)), + } +} + +func (db *DB) Search(ctx context.Context, opts *SearchOptions) ([]Schedule, error) { + err := permission.LimitCheckAny(ctx, permission.User) + if err != nil { + return nil, err + } + if opts == nil { + opts = &SearchOptions{} + } + data, err := (*renderData)(opts).Normalize() + if err != nil { + return nil, err + } + query, args, err := search.RenderQuery(ctx, searchTemplate, data) + if err != nil { + return nil, errors.Wrap(err, "render query") + } + + rows, err := db.db.QueryContext(ctx, query, args...) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, err + } + defer rows.Close() + + var result []Schedule + var s Schedule + var tz string + for rows.Next() { + err = rows.Scan(&s.ID, &s.Name, &s.Description, &tz) + if err != nil { + return nil, err + } + loc, err := util.LoadLocation(tz) + if err != nil { + return nil, err + } + s.TimeZone = loc + result = append(result, s) + } + + return result, nil +} diff --git a/schedule/shiftcalc/overrides.go b/schedule/shiftcalc/overrides.go new file mode 100644 index 0000000000..0db34e5d1d --- /dev/null +++ b/schedule/shiftcalc/overrides.go @@ -0,0 +1,90 @@ +package shiftcalc + +import ( + "github.com/target/goalert/override" + "sort" + "time" +) + +func (d *data) ScheduleFinalShiftsWithOverrides(start, end time.Time) []Shift { + return finalShiftsWithOverrides(d.ScheduleFinalShifts(start, end), d.userOverrides) +} + +func applyOverride(shift Shift, o override.UserOverride) (result []Shift) { + if shift.UserID != o.RemoveUserID { + return []Shift{shift} + } + if !o.Start.Before(shift.End) { + return []Shift{shift} + } + if !o.End.After(shift.Start) { + return []Shift{shift} + } + + if shift.Start.Before(o.Start) { + // break off first part of shift + result = append(result, Shift{UserID: shift.UserID, Start: shift.Start, End: o.Start}) + + // advance the start of the remaining shift being processed + shift.Start = o.Start + } + + // at this point we know that shift start is during override + + // we're "replacing", so the AddUserID is on-call during the override + end := o.End // end is the end of the override or shift, whichever is sooner + if end.After(shift.End) { + end = shift.End + } + if o.AddUserID != "" { + result = append(result, Shift{UserID: o.AddUserID, Start: shift.Start, End: end}) + } + + if end.Before(shift.End) { + // Original user completes their shift + result = append(result, Shift{UserID: shift.UserID, Start: end, End: shift.End}) + } + + return result +} + +func finalShiftsWithOverrides(final []Shift, userOverrides []override.UserOverride) []Shift { + withOverrides := make([]Shift, 0, len(final)) + + addOverrides := make([]override.UserOverride, 0, len(userOverrides)) + otherOverrides := make([]override.UserOverride, 0, len(userOverrides)) + for _, o := range userOverrides { + if o.RemoveUserID == "" { + addOverrides = append(addOverrides, o) + continue + } + + otherOverrides = append(otherOverrides, o) + } + + // Sort overrides by start time so that as we progress the .Start of the shift forward + // they don't get skipped over. + sort.Slice(otherOverrides, func(i, j int) bool { return otherOverrides[i].Start.Before(otherOverrides[j].Start) }) + + a := make([]Shift, 0, len(final)*2) + b := make([]Shift, 0, len(final)*2) + a = append(a, final...) + for _, o := range otherOverrides { + for _, shift := range a { + b = append(b, applyOverride(shift, o)...) + } + a, b = b, a + b = b[:0] + } + withOverrides = append(withOverrides, a...) + + for _, o := range addOverrides { + withOverrides = append(withOverrides, Shift{ + Start: o.Start, + End: o.End, + UserID: o.AddUserID, + }) + } + + return mergeShiftsByTarget(withOverrides) +} diff --git a/schedule/shiftcalc/overrides_test.go b/schedule/shiftcalc/overrides_test.go new file mode 100644 index 0000000000..550ca12f21 --- /dev/null +++ b/schedule/shiftcalc/overrides_test.go @@ -0,0 +1,126 @@ +package shiftcalc + +import ( + "github.com/target/goalert/override" + "testing" + "time" +) + +func TestFinalShiftsWithOverrides(t *testing.T) { + check := func(shifts []Shift, overrides []override.UserOverride, expected []Shift) { + t.Run("", func(t *testing.T) { + res := finalShiftsWithOverrides(shifts, overrides) + if len(res) != len(expected) { + t.Fatalf("got len() = %d; want %d", len(res), len(expected)) + } + for i, exp := range expected { + if !res[i].Start.Equal(exp.Start) { + t.Errorf("Shift[%d]: Start=%s; want %s", i, res[i].Start, exp.Start) + } + if !res[i].End.Equal(exp.End) { + t.Errorf("Shift[%d]: End=%s; want %s", i, res[i].End, exp.End) + } + if res[i].UserID != exp.UserID { + t.Errorf("Shift[%d]: UserID=%s; want %s", i, res[i].UserID, exp.UserID) + } + } + }) + } + + check( + []Shift{ + {UserID: "a", Start: time.Date(0, 0, 0, 0, 9, 0, 0, time.UTC), End: time.Date(0, 0, 0, 0, 21, 0, 0, time.UTC)}, + {UserID: "a", Start: time.Date(0, 0, 1, 0, 9, 0, 0, time.UTC), End: time.Date(0, 0, 1, 0, 21, 0, 0, time.UTC)}, + }, + nil, + []Shift{ + {UserID: "a", Start: time.Date(0, 0, 0, 0, 9, 0, 0, time.UTC), End: time.Date(0, 0, 0, 0, 21, 0, 0, time.UTC)}, + {UserID: "a", Start: time.Date(0, 0, 1, 0, 9, 0, 0, time.UTC), End: time.Date(0, 0, 1, 0, 21, 0, 0, time.UTC)}, + }, + ) + + check( + []Shift{ + {UserID: "a", Start: time.Date(0, 0, 0, 0, 9, 0, 0, time.UTC), End: time.Date(0, 0, 0, 0, 21, 0, 0, time.UTC)}, + {UserID: "a", Start: time.Date(0, 0, 1, 0, 9, 0, 0, time.UTC), End: time.Date(0, 0, 1, 0, 21, 0, 0, time.UTC)}, + }, + []override.UserOverride{{RemoveUserID: "a", Start: time.Date(0, 0, 0, 0, 9, 0, 0, time.UTC), End: time.Date(0, 0, 0, 0, 21, 0, 0, time.UTC)}}, + []Shift{ + {UserID: "a", Start: time.Date(0, 0, 1, 0, 9, 0, 0, time.UTC), End: time.Date(0, 0, 1, 0, 21, 0, 0, time.UTC)}, + }, + ) + + check( + []Shift{ + {UserID: "a", Start: time.Date(0, 0, 0, 0, 9, 0, 0, time.UTC), End: time.Date(0, 0, 0, 0, 21, 0, 0, time.UTC)}, + {UserID: "a", Start: time.Date(0, 0, 1, 0, 9, 0, 0, time.UTC), End: time.Date(0, 0, 1, 0, 21, 0, 0, time.UTC)}, + }, + []override.UserOverride{{AddUserID: "b", Start: time.Date(0, 0, 0, 0, 9, 0, 0, time.UTC), End: time.Date(0, 0, 0, 0, 21, 0, 0, time.UTC)}}, + []Shift{ + {UserID: "a", Start: time.Date(0, 0, 0, 0, 9, 0, 0, time.UTC), End: time.Date(0, 0, 0, 0, 21, 0, 0, time.UTC)}, + {UserID: "b", Start: time.Date(0, 0, 0, 0, 9, 0, 0, time.UTC), End: time.Date(0, 0, 0, 0, 21, 0, 0, time.UTC)}, + {UserID: "a", Start: time.Date(0, 0, 1, 0, 9, 0, 0, time.UTC), End: time.Date(0, 0, 1, 0, 21, 0, 0, time.UTC)}, + }, + ) + + check( + []Shift{ + {UserID: "a", Start: time.Date(0, 0, 0, 0, 9, 0, 0, time.UTC), End: time.Date(0, 0, 0, 0, 21, 0, 0, time.UTC)}, + {UserID: "a", Start: time.Date(0, 0, 1, 0, 9, 0, 0, time.UTC), End: time.Date(0, 0, 1, 0, 21, 0, 0, time.UTC)}, + }, + []override.UserOverride{{RemoveUserID: "a", AddUserID: "b", Start: time.Date(0, 0, 0, 0, 9, 0, 0, time.UTC), End: time.Date(0, 0, 0, 0, 9, 5, 0, time.UTC)}}, + []Shift{ + {UserID: "b", Start: time.Date(0, 0, 0, 0, 9, 0, 0, time.UTC), End: time.Date(0, 0, 0, 0, 9, 5, 0, time.UTC)}, + {UserID: "a", Start: time.Date(0, 0, 0, 0, 9, 5, 0, time.UTC), End: time.Date(0, 0, 0, 0, 21, 0, 0, time.UTC)}, + {UserID: "a", Start: time.Date(0, 0, 1, 0, 9, 0, 0, time.UTC), End: time.Date(0, 0, 1, 0, 21, 0, 0, time.UTC)}, + }, + ) + + check( + []Shift{ + {UserID: "Joey", Start: time.Date(2018, 3, 16, 13, 30, 0, 0, time.UTC), End: time.Date(2018, 3, 17, 1, 30, 0, 0, time.UTC)}, + {UserID: "Srilekha", Start: time.Date(2018, 3, 17, 1, 30, 0, 0, time.UTC), End: time.Date(2018, 3, 17, 13, 30, 0, 0, time.UTC)}, + {UserID: "Joey", Start: time.Date(2018, 3, 17, 13, 30, 0, 0, time.UTC), End: time.Date(2018, 3, 18, 1, 30, 0, 0, time.UTC)}, + {UserID: "Srilekha", Start: time.Date(2018, 3, 18, 1, 30, 0, 0, time.UTC), End: time.Date(2018, 3, 18, 13, 30, 0, 0, time.UTC)}, + {UserID: "Joey", Start: time.Date(2018, 3, 18, 13, 30, 0, 0, time.UTC), End: time.Date(2018, 3, 19, 1, 30, 0, 0, time.UTC)}, + {UserID: "Srilekha", Start: time.Date(2018, 3, 19, 1, 30, 0, 0, time.UTC), End: time.Date(2018, 3, 19, 13, 30, 0, 0, time.UTC)}, + {UserID: "Joey", Start: time.Date(2018, 3, 19, 13, 30, 0, 0, time.UTC), End: time.Date(2018, 3, 20, 1, 30, 0, 0, time.UTC)}, + {UserID: "Srilekha", Start: time.Date(2018, 3, 20, 1, 30, 0, 0, time.UTC), End: time.Date(2018, 3, 20, 13, 30, 0, 0, time.UTC)}, + {UserID: "Joey", Start: time.Date(2018, 3, 20, 13, 30, 0, 0, time.UTC), End: time.Date(2018, 3, 21, 1, 30, 0, 0, time.UTC)}, + {UserID: "Srilekha", Start: time.Date(2018, 3, 21, 1, 30, 0, 0, time.UTC), End: time.Date(2018, 3, 21, 13, 30, 0, 0, time.UTC)}, + {UserID: "Joey", Start: time.Date(2018, 3, 21, 13, 30, 0, 0, time.UTC), End: time.Date(2018, 3, 22, 1, 30, 0, 0, time.UTC)}, + {UserID: "Srilekha", Start: time.Date(2018, 3, 22, 1, 30, 0, 0, time.UTC), End: time.Date(2018, 3, 22, 13, 30, 0, 0, time.UTC)}, + }, + []override.UserOverride{ + {AddUserID: "Tom", RemoveUserID: "Joey", Start: time.Date(2018, 3, 17, 22, 0, 0, 0, time.UTC), End: time.Date(2018, 3, 18, 1, 30, 0, 0, time.UTC)}, + {AddUserID: "Dyanne", RemoveUserID: "Joey", Start: time.Date(2018, 3, 20, 13, 30, 0, 0, time.UTC), End: time.Date(2018, 3, 21, 1, 30, 0, 0, time.UTC)}, + {AddUserID: "Tom", RemoveUserID: "Joey", Start: time.Date(2018, 3, 19, 19, 0, 0, 0, time.UTC), End: time.Date(2018, 3, 20, 1, 30, 0, 0, time.UTC)}, + }, + []Shift{ + {UserID: "Joey", Start: time.Date(2018, 3, 16, 13, 30, 0, 0, time.UTC), End: time.Date(2018, 3, 17, 1, 30, 0, 0, time.UTC)}, + {UserID: "Srilekha", Start: time.Date(2018, 3, 17, 1, 30, 0, 0, time.UTC), End: time.Date(2018, 3, 17, 13, 30, 0, 0, time.UTC)}, + + // Tom takes over the end of Joey's shift + {UserID: "Joey", Start: time.Date(2018, 3, 17, 13, 30, 0, 0, time.UTC), End: time.Date(2018, 3, 17, 22, 00, 0, 0, time.UTC)}, + {UserID: "Tom", Start: time.Date(2018, 3, 17, 22, 00, 0, 0, time.UTC), End: time.Date(2018, 3, 18, 1, 30, 0, 0, time.UTC)}, + + {UserID: "Srilekha", Start: time.Date(2018, 3, 18, 1, 30, 0, 0, time.UTC), End: time.Date(2018, 3, 18, 13, 30, 0, 0, time.UTC)}, + {UserID: "Joey", Start: time.Date(2018, 3, 18, 13, 30, 0, 0, time.UTC), End: time.Date(2018, 3, 19, 1, 30, 0, 0, time.UTC)}, + {UserID: "Srilekha", Start: time.Date(2018, 3, 19, 1, 30, 0, 0, time.UTC), End: time.Date(2018, 3, 19, 13, 30, 0, 0, time.UTC)}, + + // Tom takes over the end of Joey's shift + {UserID: "Joey", Start: time.Date(2018, 3, 19, 13, 30, 0, 0, time.UTC), End: time.Date(2018, 3, 19, 19, 0, 0, 0, time.UTC)}, + {UserID: "Tom", Start: time.Date(2018, 3, 19, 19, 0, 0, 0, time.UTC), End: time.Date(2018, 3, 20, 1, 30, 0, 0, time.UTC)}, + + {UserID: "Srilekha", Start: time.Date(2018, 3, 20, 1, 30, 0, 0, time.UTC), End: time.Date(2018, 3, 20, 13, 30, 0, 0, time.UTC)}, + + // Dyanne takes over Joey's entire shift + {UserID: "Dyanne", Start: time.Date(2018, 3, 20, 13, 30, 0, 0, time.UTC), End: time.Date(2018, 3, 21, 1, 30, 0, 0, time.UTC)}, + + {UserID: "Srilekha", Start: time.Date(2018, 3, 21, 1, 30, 0, 0, time.UTC), End: time.Date(2018, 3, 21, 13, 30, 0, 0, time.UTC)}, + {UserID: "Joey", Start: time.Date(2018, 3, 21, 13, 30, 0, 0, time.UTC), End: time.Date(2018, 3, 22, 1, 30, 0, 0, time.UTC)}, + {UserID: "Srilekha", Start: time.Date(2018, 3, 22, 1, 30, 0, 0, time.UTC), End: time.Date(2018, 3, 22, 13, 30, 0, 0, time.UTC)}, + }, + ) + +} diff --git a/schedule/shiftcalc/shiftcalc.go b/schedule/shiftcalc/shiftcalc.go new file mode 100644 index 0000000000..d2ae190615 --- /dev/null +++ b/schedule/shiftcalc/shiftcalc.go @@ -0,0 +1,502 @@ +package shiftcalc + +import ( + "context" + "fmt" + "github.com/target/goalert/assignment" + "github.com/target/goalert/override" + "github.com/target/goalert/schedule" + "github.com/target/goalert/schedule/rotation" + "github.com/target/goalert/schedule/rule" + "github.com/target/goalert/util/log" + "sort" + "time" + + "github.com/pkg/errors" +) + +type Calculator interface { + RotationShifts(ctx context.Context, start, end time.Time, rotationID string) ([]RotationShift, error) + ScheduleAssignments(ctx context.Context, start, end time.Time, scheduleID string) ([]ScheduleAssignment, error) + ScheduleFinalShifts(ctx context.Context, start, end time.Time, scheduleID string) ([]Shift, error) + ScheduleFinalShiftsWithOverrides(ctx context.Context, start, end time.Time, scheduleID string) ([]Shift, error) +} +type Shift struct { + Start time.Time `json:"start_time"` + End time.Time `json:"end_time"` + UserID string `json:"user_id"` +} + +const debugTimeFmt = "MonJan2_2006@3:04pm" + +func (s Shift) String() string { + return fmt.Sprintf("Shift{Start: %s, End: %s, UserID: %s}", + s.Start.Local().Format(debugTimeFmt), + s.End.Local().Format(debugTimeFmt), + s.UserID, + ) +} + +type ShiftCalculator struct { + RuleStore rule.Store + SchedStore schedule.Store + RotStore rotation.Store + OStore override.Store +} + +type ScheduleAssignment struct { + Target assignment.Target `json:"target"` + ScheduleID string `json:"schedule_id"` + Rules []rule.Rule `json:"rules"` + Shifts []Shift `json:"shifts"` +} + +type RotationShift struct { + Start time.Time `json:"start_time"` + End time.Time `json:"end_time"` + PartID string `json:"participant_id"` +} + +type data struct { + sched schedule.Schedule + rules []rule.Rule + rots []rotation.Rotation + parts []rotation.Participant + rState []rotation.State + userOverrides []override.UserOverride +} + +func (d *data) rulesByTarget() map[assignment.RawTarget][]rule.Rule { + m := make(map[assignment.RawTarget][]rule.Rule, len(d.rules)) + for _, r := range d.rules { + raw := assignment.NewRawTarget(r.Target) + m[raw] = append(m[raw], r) + } + return m +} + +func (d *data) ScheduleAssignments(start, end time.Time) []ScheduleAssignment { + tgtMap := d.rulesByTarget() + + result := make([]ScheduleAssignment, 0, len(tgtMap)) + for tgt, rules := range tgtMap { + result = append(result, ScheduleAssignment{ + Rules: rules, + Target: tgt, + Shifts: d.ShiftsForRules(start, end, rules), + ScheduleID: d.sched.ID, + }) + } + + sort.Slice(result, func(i, j int) bool { + iType := result[i].Target.TargetType() + jType := result[j].Target.TargetType() + if iType != jType && iType == assignment.TargetTypeUser { + return true + } + return result[i].Target.TargetID() < result[j].Target.TargetID() + }) + + return result +} + +func (d *data) ScheduleFinalShifts(start, end time.Time) []Shift { + tgtMap := d.rulesByTarget() + + var resultShifts []Shift + for _, rules := range tgtMap { + shifts := d.ShiftsForRules(start, end, rules) + resultShifts = append(resultShifts, shifts...) + } + + return mergeShiftsByTarget(resultShifts) +} + +func (d *data) ShiftsForRules(start, end time.Time, rules []rule.Rule) []Shift { + var shifts []Shift + for _, r := range rules { + shifts = append(shifts, d.ShiftsForRule(start, end, r)...) + } + + return mergeShiftsByTarget(shifts) +} + +func (d *data) rotation(id string) *rotation.Rotation { + for _, r := range d.rots { + if r.ID == id { + return &r + } + } + + return nil +} + +func (d *data) rotationParticipantUserIDs(id string) []string { + var parts []rotation.Participant + for _, p := range d.parts { + if p.RotationID != id { + continue + } + parts = append(parts, p) + } + sort.Slice(parts, func(i, j int) bool { return parts[i].Position < parts[j].Position }) + userIDs := make([]string, len(parts)) + for i, p := range parts { + userIDs[i] = p.Target.TargetID() + } + + return userIDs +} + +func (d *data) rotationState(id string) *rotation.State { + for _, r := range d.rState { + if r.RotationID == id { + return &r + } + } + + return nil +} + +func (d *data) ShiftsForRule(start, end time.Time, rule rule.Rule) []Shift { + start = start.In(d.sched.TimeZone) + end = end.In(d.sched.TimeZone) + + rShifts := ruleShifts(start, end, rule) + if rule.Target.TargetType() == assignment.TargetTypeUser { + for i := range rShifts { + rShifts[i].UserID = rule.Target.TargetID() + } + return rShifts + } + + if len(rShifts) == 0 { + return nil + } + + rotID := rule.Target.TargetID() + state := d.rotationState(rotID) + if state == nil || !end.After(state.ShiftStart) { + return nil + } + + orig := rShifts + rShifts = rShifts[:0] + for _, s := range orig { + if s.End.Before(state.ShiftStart) { + continue + } + rShifts = append(rShifts, s) + } + if len(rShifts) == 0 { + return nil + } + + userIDs := d.rotationParticipantUserIDs(rotID) + if len(userIDs) == 0 { + return nil + } + rot := d.rotation(rotID) + if rot == nil { + return nil + } + + partCount := len(userIDs) + curUserID := userIDs[state.Position%partCount] + rotEnd := rot.EndTime(state.ShiftStart) + nextPart := func() { + state.Position = (state.Position + 1) % partCount + state.ShiftStart = rotEnd + curUserID = userIDs[state.Position] + rotEnd = rot.EndTime(state.ShiftStart) + } + + for !rotEnd.After(rShifts[0].Start) { + nextPart() + } + + expanded := make([]Shift, 0, len(rShifts)) + for _, shift := range rShifts { + if shift.End.Before(state.ShiftStart) { + continue + } + for !rotEnd.After(shift.Start) { + nextPart() + } + start := shift.Start + if start.Before(state.ShiftStart) { + start = state.ShiftStart + } + for rotEnd.Before(shift.End) { + expanded = append(expanded, Shift{Start: start, End: rotEnd, UserID: curUserID}) + start = rotEnd + nextPart() + } + expanded = append(expanded, Shift{Start: start, End: shift.End, UserID: curUserID}) + } + return expanded +} + +func (c *ShiftCalculator) fetchData(ctx context.Context, schedID string) (*data, error) { + sched, err := c.SchedStore.FindOne(ctx, schedID) + if err != nil { + return nil, errors.Wrap(err, "fetch schedule details") + } + rules, err := c.RuleStore.FindAll(ctx, schedID) + if err != nil { + return nil, errors.Wrap(err, "fetch schedule rules") + } + rots, err := c.RotStore.FindAllRotationsByScheduleID(ctx, schedID) + if err != nil { + return nil, errors.Wrap(err, "fetch schedule rotations") + } + parts, err := c.RotStore.FindAllParticipantsByScheduleID(ctx, schedID) + if err != nil { + return nil, errors.Wrap(err, "fetch schedule rotation participants") + } + rState, err := c.RotStore.FindAllStateByScheduleID(ctx, schedID) + if err != nil { + return nil, errors.Wrap(err, "fetch schedule rotation state") + } + return &data{ + sched: *sched, + rules: rules, + rots: rots, + parts: parts, + rState: rState, + }, nil +} + +func (c *ShiftCalculator) ScheduleAssignments(ctx context.Context, start, end time.Time, schedID string) ([]ScheduleAssignment, error) { + data, err := c.fetchData(ctx, schedID) + if err != nil { + return nil, err + } + + return data.ScheduleAssignments(start, end), nil +} + +func (c *ShiftCalculator) ScheduleFinalShifts(ctx context.Context, start, end time.Time, schedID string) ([]Shift, error) { + data, err := c.fetchData(ctx, schedID) + if err != nil { + return nil, err + } + + return data.ScheduleFinalShifts(start, end), nil +} + +// ScheduleFinalShiftsWithOverrides will calculate the final set of on-call shifts for the schedule during the given time frame. +func (c *ShiftCalculator) ScheduleFinalShiftsWithOverrides(ctx context.Context, start, end time.Time, schedID string) ([]Shift, error) { + data, err := c.fetchData(ctx, schedID) + if err != nil { + return nil, err + } + data.userOverrides, err = c.OStore.FindAllUserOverrides(ctx, start, end, assignment.ScheduleTarget(schedID)) + if err != nil { + return nil, err + } + + return data.ScheduleFinalShiftsWithOverrides(start, end), nil +} + +// _rotationShifts get's all rotation shifts with hard alignment to start and end. +func _rotationShifts(start, end time.Time, rot *rotation.Rotation, actShiftPos int, actShiftStart time.Time, partIDs []string) []RotationShift { + if actShiftStart.After(end) { + return nil + } + var shifts []RotationShift + + cPos, cStart, cEnd := actShiftPos, actShiftStart, rot.EndTime(actShiftStart) + for { + if cStart.Before(start) { + cStart = start + } + if cEnd.After(end) { + cEnd = end + } + + if cEnd.After(start) { + shifts = append(shifts, RotationShift{ + Start: cStart, + End: cEnd, + PartID: partIDs[cPos], + }) + } + if cEnd.Equal(end) { + return shifts + } + + cStart, cEnd, cPos = cEnd, rot.EndTime(cEnd), (cPos+1)%len(partIDs) + } +} + +func (r *ShiftCalculator) RotationShifts(ctx context.Context, start, end time.Time, rotationID string) ([]RotationShift, error) { + if end.Before(start) { + return nil, nil + } + ctx = log.WithField(ctx, "RotationID", rotationID) + state, err := r.RotStore.State(ctx, rotationID) + if err == rotation.ErrNoState { + return nil, nil + } + if err != nil { + return nil, errors.Wrap(err, "lookup rotation state") + } + if state == nil { + return nil, nil + } + + rot, err := r.RotStore.FindRotation(ctx, rotationID) + if err != nil { + return nil, errors.Wrap(err, "lookup rotation config") + } + + parts, err := r.RotStore.FindAllParticipants(ctx, rotationID) + if err != nil { + return nil, errors.Wrap(err, "lookup rotation participants") + } + + if len(parts) == 0 { + return nil, nil + } + + sort.Slice(parts, func(i, j int) bool { return parts[i].Position < parts[j].Position }) + partIDs := make([]string, len(parts)) + for i, p := range parts { + partIDs[i] = p.ID + } + + shifts := _rotationShifts(start, end, rot, state.Position, state.ShiftStart, partIDs) + shifts = mergeRotationShiftsByID(shifts) + sort.Slice(shifts, func(i, j int) bool { return shifts[i].Start.Before(shifts[j].Start) }) + + return shifts, err +} + +func mergeRotationShiftsByID(shifts []RotationShift) []RotationShift { + sort.Slice(shifts, func(i, j int) bool { return shifts[i].Start.Before(shifts[j].Start) }) + + m := make(map[string][]RotationShift) + for _, s := range shifts { + m[s.PartID] = append(m[s.PartID], s) + } + + shifts = shifts[:0] + for _, sh := range m { + shifts = append(shifts, mergeRotationShifts(sh)...) + } + + return shifts +} + +func mergeRotationShifts(shifts []RotationShift) []RotationShift { + if len(shifts) < 2 { + return shifts + } + + merged := make([]RotationShift, 0, len(shifts)) + cur := shifts[0] + for _, s := range shifts[1:] { + if s.Start.Before(cur.End) || s.Start.Equal(cur.End) { + if s.End.After(cur.End) { + cur.End = s.End + } + continue + } + + merged = append(merged, cur) + cur = s + } + merged = append(merged, cur) + + return merged +} + +func sortShifts(shifts []Shift) { + sort.Slice(shifts, func(i, j int) bool { + if !shifts[i].Start.Equal(shifts[j].Start) { + return shifts[i].Start.Before(shifts[j].Start) + } + if !shifts[i].End.Equal(shifts[j].End) { + return shifts[i].End.Before(shifts[j].End) + } + + return shifts[i].UserID < shifts[j].UserID + }) +} + +func mergeShiftsByTarget(shifts []Shift) []Shift { + sortShifts(shifts) + m := make(map[string][]Shift) + for _, s := range shifts { + m[s.UserID] = append(m[s.UserID], s) + } + + shifts = shifts[:0] + for _, tgtShifts := range m { + shifts = append(shifts, mergeShifts(tgtShifts)...) + } + sortShifts(shifts) + return shifts +} + +// mergeShifts will merge shifts based on start and end times +// s should already be sorted, and it is assumed that all Assignments are identical +func mergeShifts(shifts []Shift) []Shift { + if len(shifts) < 2 { + return shifts + } + + merged := make([]Shift, 0, len(shifts)) + cur := shifts[0] + for _, s := range shifts[1:] { + if s.Start.Before(cur.End) || s.Start.Equal(cur.End) { + if s.End.After(cur.End) { + cur.End = s.End + } + continue + } + + merged = append(merged, cur) + cur = s + } + merged = append(merged, cur) + + return merged +} + +func ruleShifts(start, end time.Time, rule rule.Rule) []Shift { + if end.Before(start) { + return nil + } + if rule.AlwaysActive() { + return []Shift{ + {Start: start, End: end}, + } + } + if rule.NeverActive() { + return nil + } + + var shifts []Shift + + shiftStart := rule.StartTime(start) + shiftEnd := rule.EndTime(shiftStart) + + var c int + // arbitrary limit on the number of returned shifts + for { + c++ + if c > 10000 { + panic("too many shifts") + } + shifts = append(shifts, Shift{Start: shiftStart, End: shiftEnd}) + shiftStart = rule.StartTime(shiftEnd) + if shiftStart.After(end) { + break + } + shiftEnd = rule.EndTime(shiftStart) + } + + return shifts +} diff --git a/schedule/shiftcalc/shiftcalc_test.go b/schedule/shiftcalc/shiftcalc_test.go new file mode 100644 index 0000000000..cce308cd45 --- /dev/null +++ b/schedule/shiftcalc/shiftcalc_test.go @@ -0,0 +1,120 @@ +package shiftcalc + +import ( + "github.com/target/goalert/schedule/rotation" + "github.com/target/goalert/schedule/rule" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func parseDate(t *testing.T, value string) time.Time { + t.Helper() + return parseTimeFmt(t, "Jan _2 3:04PM 2006", value) +} + +func parseTimeFmt(t *testing.T, layout, value string) time.Time { + t.Helper() + tm, err := time.ParseInLocation(layout, value, time.Local) + if err != nil { + t.Fatal(err) + } + return tm.In(time.Local) +} + +func TestRotationShifts(t *testing.T) { + rotStart := time.Date(2017, time.January, 0, 1, 0, 0, 0, time.UTC) + qStart := time.Date(2017, time.January, 0, 0, 0, 0, 0, time.UTC) + qEnd := time.Date(2017, time.February, 0, 0, 0, 0, 0, time.UTC) + + rot := &rotation.Rotation{ + Type: rotation.TypeDaily, + ShiftLength: 1, + Start: rotStart, + } + parts := []string{"first", "second", "third"} + shifts := _rotationShifts(qStart, qEnd, rot, 1, rotStart, parts) + + if len(shifts) != 31 { + t.Errorf("got %d shifts; want 31", len(shifts)) + } + if shifts[0].PartID != "second" { + t.Errorf("got '%s' participant for first shift; want 'second' participant", shifts[0].PartID) + } + if !shifts[0].Start.Equal(rotStart) { + t.Errorf("got '%s' for first shift start; want rotation start (%s)", shifts[0].Start.String(), rotStart.String()) + } +} + +func TestRuleShifts(t *testing.T) { + start := parseDate(t, "Jul 20 11:00AM 2017") + end := parseDate(t, "Jul 24 11:00AM 2017") + + var r rule.Rule + r.Start = rule.NewClock(8, 0) + r.End = rule.NewClock(20, 0) + r.SetDay(time.Friday, true) + r.SetDay(time.Saturday, true) + r.SetDay(time.Monday, true) + + shifts := ruleShifts(start, end, r) + + assert.Contains(t, shifts, Shift{ + Start: parseDate(t, "Jul 21 8:00AM 2017"), + End: parseDate(t, "Jul 21 8:00PM 2017"), + }) + assert.Contains(t, shifts, Shift{ + Start: parseDate(t, "Jul 22 8:00AM 2017"), + End: parseDate(t, "Jul 22 8:00PM 2017"), + }) + assert.Contains(t, shifts, Shift{ + Start: parseDate(t, "Jul 24 8:00AM 2017"), + End: parseDate(t, "Jul 24 8:00PM 2017"), + }) + + assert.Len(t, shifts, 3) +} + +// for historical shift data +func TestHistoricalShifts(t *testing.T) { + start := parseDate(t, "Jul 20 11:00AM 2018") + end := parseDate(t, "Aug 24 11:00AM 2018") + + var r rule.Rule + r.Start = rule.NewClock(8, 0) + r.End = rule.NewClock(20, 0) + r.SetDay(time.Sunday, true) + r.SetDay(time.Monday, true) + r.SetDay(time.Tuesday, true) + + shifts := ruleShifts(start, end, r) + + assert.Contains(t, shifts, Shift{ + Start: parseDate(t, "Jul 23 8:00AM 2018"), + End: parseDate(t, "Jul 23 8:00PM 2018"), + }) + assert.Contains(t, shifts, Shift{ + Start: parseDate(t, "Aug 20 8:00AM 2018"), + End: parseDate(t, "Aug 20 8:00PM 2018"), + }) + assert.NotContains(t, shifts, Shift{ + Start: parseDate(t, "Aug 22 11:00AM 2018"), + End: parseDate(t, "Aug 22 11:00PM 2018"), + }) + assert.NotContains(t, shifts, Shift{ + Start: parseDate(t, "Jul 19 11:00AM 2018"), + End: parseDate(t, "Jul 19 11:00PM 2018"), + }) + assert.NotContains(t, shifts, Shift{ + Start: parseDate(t, "Jul 19 11:00AM 2018"), + End: parseDate(t, "Jul 20 11:00PM 2018"), + }) + assert.NotContains(t, shifts, Shift{ + Start: parseDate(t, "Aug 24 11:00AM 2018"), + End: parseDate(t, "Aug 29 11:00PM 2018"), + }) + + assert.Len(t, shifts, 15) + +} diff --git a/schedule/store.go b/schedule/store.go new file mode 100644 index 0000000000..3d1eb74718 --- /dev/null +++ b/schedule/store.go @@ -0,0 +1,269 @@ +package schedule + +import ( + "context" + "database/sql" + "github.com/target/goalert/permission" + "github.com/target/goalert/util" + "github.com/target/goalert/validation/validate" + + "github.com/lib/pq" + + "github.com/pkg/errors" +) + +type Store interface { + ReadStore + Create(context.Context, *Schedule) (*Schedule, error) + CreateScheduleTx(context.Context, *sql.Tx, *Schedule) (*Schedule, error) + Update(context.Context, *Schedule) error + UpdateTx(context.Context, *sql.Tx, *Schedule) error + Delete(context.Context, string) error + DeleteTx(context.Context, *sql.Tx, string) error + DeleteManyTx(context.Context, *sql.Tx, []string) error + FindMany(context.Context, []string) ([]Schedule, error) + FindOneForUpdate(ctx context.Context, tx *sql.Tx, id string) (*Schedule, error) + + Search(context.Context, *SearchOptions) ([]Schedule, error) +} +type ReadStore interface { + FindAll(context.Context) ([]Schedule, error) + FindOne(context.Context, string) (*Schedule, error) +} + +type DB struct { + db *sql.DB + + create *sql.Stmt + update *sql.Stmt + findAll *sql.Stmt + findOne *sql.Stmt + delete *sql.Stmt + + findOneUp *sql.Stmt + + findMany *sql.Stmt +} + +func NewDB(ctx context.Context, db *sql.DB) (*DB, error) { + p := &util.Prepare{DB: db, Ctx: ctx} + + return &DB{ + db: db, + create: p.P(`INSERT INTO schedules (id, name, description, time_zone) VALUES (DEFAULT, $1, $2, $3) RETURNING id`), + update: p.P(`UPDATE schedules SET name = $2, description = $3, time_zone = $4 WHERE id = $1`), + findAll: p.P(`SELECT id, name, description, time_zone FROM schedules`), + findOne: p.P(`SELECT id, name, description, time_zone FROM schedules WHERE id = $1`), + + findOneUp: p.P(`SELECT id, name, description, time_zone FROM schedules WHERE id = $1 FOR UPDATE`), + + findMany: p.P(`SELECT id, name, description, time_zone FROM schedules WHERE id = any($1)`), + + delete: p.P(`DELETE FROM schedules WHERE id = any($1)`), + }, p.Err +} +func (db *DB) FindMany(ctx context.Context, ids []string) ([]Schedule, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + err = validate.ManyUUID("ScheduleID", ids, 200) + if err != nil { + return nil, err + } + + rows, err := db.findMany.QueryContext(ctx, pq.StringArray(ids)) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, err + } + defer rows.Close() + + result := make([]Schedule, 0, len(ids)) + var s Schedule + var tz string + for rows.Next() { + err = rows.Scan(&s.ID, &s.Name, &s.Description, &tz) + if err != nil { + return nil, err + } + + s.TimeZone, err = util.LoadLocation(tz) + if err != nil { + return nil, err + } + result = append(result, s) + } + + return result, nil +} +func (db *DB) Create(ctx context.Context, s *Schedule) (*Schedule, error) { + return db.CreateScheduleTx(ctx, nil, s) +} + +func (db *DB) CreateScheduleTx(ctx context.Context, tx *sql.Tx, s *Schedule) (*Schedule, error) { + n, err := s.Normalize() + if err != nil { + return nil, err + } + + err = permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return nil, err + } + stmt := db.create + if tx != nil { + stmt = tx.Stmt(stmt) + } + row := stmt.QueryRowContext(ctx, n.Name, n.Description, n.TimeZone.String()) + err = row.Scan(&n.ID) + return n, err +} + +func (db *DB) Update(ctx context.Context, s *Schedule) error { + n, err := s.Normalize() + if err != nil { + return err + } + + err = validate.UUID("ScheduleID", s.ID) + if err != nil { + return err + } + + err = permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return err + } + + _, err = db.update.ExecContext(ctx, n.ID, n.Name, n.Description, n.TimeZone.String()) + return err +} +func (db *DB) UpdateTx(ctx context.Context, tx *sql.Tx, s *Schedule) error { + err := permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return err + } + n, err := s.Normalize() + if err != nil { + return err + } + + err = validate.UUID("ScheduleID", n.ID) + if err != nil { + return err + } + + _, err = tx.StmtContext(ctx, db.update).ExecContext(ctx, n.ID, n.Name, n.Description, n.TimeZone.String()) + return err +} + +func (db *DB) FindAll(ctx context.Context) ([]Schedule, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + + rows, err := db.findAll.QueryContext(ctx) + if err != nil { + return nil, err + } + defer rows.Close() + + var s Schedule + var tz string + var res []Schedule + for rows.Next() { + err = rows.Scan(&s.ID, &s.Name, &s.Description, &tz) + if err != nil { + return nil, err + } + s.TimeZone, err = util.LoadLocation(tz) + if err != nil { + return nil, errors.Wrap(err, "parse scanned time zone") + } + res = append(res, s) + } + + return res, nil +} + +func (db *DB) FindOneForUpdate(ctx context.Context, tx *sql.Tx, id string) (*Schedule, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + err = validate.UUID("ScheduleID", id) + if err != nil { + return nil, err + } + + row := tx.StmtContext(ctx, db.findOneUp).QueryRowContext(ctx, id) + var s Schedule + var tz string + err = row.Scan(&s.ID, &s.Name, &s.Description, &tz) + if err != nil { + return nil, err + } + + s.TimeZone, err = util.LoadLocation(tz) + if err != nil { + return nil, err + } + + return &s, nil +} + +func (db *DB) FindOne(ctx context.Context, id string) (*Schedule, error) { + err := validate.UUID("ScheduleID", id) + if err != nil { + return nil, err + } + err = permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + + row := db.findOne.QueryRowContext(ctx, id) + var s Schedule + var tz string + err = row.Scan(&s.ID, &s.Name, &s.Description, &tz) + if err != nil { + return nil, err + } + + s.TimeZone, err = util.LoadLocation(tz) + if err != nil { + return nil, err + } + + return &s, nil +} + +func (db *DB) Delete(ctx context.Context, id string) error { + return db.DeleteTx(ctx, nil, id) +} +func (db *DB) DeleteTx(ctx context.Context, tx *sql.Tx, id string) error { + return db.DeleteManyTx(ctx, tx, []string{id}) +} +func (db *DB) DeleteManyTx(ctx context.Context, tx *sql.Tx, ids []string) error { + err := permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return err + } + if len(ids) == 0 { + return nil + } + err = validate.ManyUUID("ScheduleID", ids, 50) + if err != nil { + return err + } + s := db.delete + if tx != nil { + s = tx.StmtContext(ctx, s) + } + _, err = s.ExecContext(ctx, pq.StringArray(ids)) + return err +} diff --git a/search/config.go b/search/config.go new file mode 100644 index 0000000000..41e54fe174 --- /dev/null +++ b/search/config.go @@ -0,0 +1,8 @@ +package search + +// Shared configuration for search methods. +const ( + MaxQueryLen = 255 + MaxResults = 150 + DefaultMaxResults = 15 +) diff --git a/search/cursor.go b/search/cursor.go new file mode 100644 index 0000000000..82e91f95fd --- /dev/null +++ b/search/cursor.go @@ -0,0 +1,29 @@ +package search + +import ( + "encoding/base64" + "encoding/json" + "github.com/target/goalert/validation" +) + +// ParseCursor will parse the data held in cursor c into the passed state object. +func ParseCursor(c string, state interface{}) error { + data, err := base64.URLEncoding.DecodeString(c) + if err != nil { + return validation.NewFieldError("Cursor", err.Error()) + } + err = json.Unmarshal(data, state) + if err != nil { + return validation.NewFieldError("Cursor", err.Error()) + } + return nil +} + +// Cursor will return a cursor for the given state data. +func Cursor(state interface{}) (string, error) { + data, err := json.Marshal(state) + if err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(data), nil +} diff --git a/search/escape.go b/search/escape.go new file mode 100644 index 0000000000..893d246cdd --- /dev/null +++ b/search/escape.go @@ -0,0 +1,12 @@ +package search + +import "strings" + +// We need to escape any characters that have meaning for `ILIKE` in Postgres. +// https://www.postgresql.org/docs/8.3/static/functions-matching.html +var escapeRep = strings.NewReplacer(`\`, `\\`, `%`, `\%`, `_`, `\_`) + +// Escape will escape a search string for use with the Postgres `like` and `ilike` operators. +func Escape(s string) string { + return escapeRep.Replace(s) +} diff --git a/search/render.go b/search/render.go new file mode 100644 index 0000000000..9973d16567 --- /dev/null +++ b/search/render.go @@ -0,0 +1,45 @@ +package search + +import ( + "bytes" + "context" + "database/sql" + "sort" + "strconv" + "strings" + "text/template" +) + +// RenderData is used as the data for a template with the ability to output a list +// of all possible arguments. +type RenderData interface { + QueryArgs() []sql.NamedArg +} + +// RenderQuery will render a search query with the given template and data. +// Named args in the format `:name:` will be replaced with the appropriate numbered +// args (e.g. `$1`, `$2`) +func RenderQuery(ctx context.Context, tmpl *template.Template, data RenderData) (query string, args []interface{}, err error) { + var buf bytes.Buffer + err = tmpl.Execute(&buf, data) + if err != nil { + return "", nil, err + } + + nArgs := data.QueryArgs() + sort.Slice(nArgs, func(i, j int) bool { return len(nArgs[i].Name) > len(nArgs[j].Name) }) + + args = make([]interface{}, 0, len(nArgs)) + query = buf.String() + n := 1 + for _, arg := range nArgs { + rep := ":" + arg.Name + if !strings.Contains(query, rep) { + continue + } + query = strings.Replace(query, rep, "$"+strconv.Itoa(n), -1) + args = append(args, arg.Value) + n++ + } + return query, args, nil +} diff --git a/service/legacysearch.go b/service/legacysearch.go new file mode 100644 index 0000000000..04213b4b6d --- /dev/null +++ b/service/legacysearch.go @@ -0,0 +1,173 @@ +package service + +import ( + "bytes" + "context" + "github.com/target/goalert/permission" + "github.com/target/goalert/search" + "github.com/target/goalert/validation/validate" + "strings" + "text/template" + + "github.com/pkg/errors" +) + +// LegacySearchOptions contains criteria for filtering and sorting services. +type LegacySearchOptions struct { + // Search is matched case-insensitive against the service name and description. + Search string + + // FavoritesUserID specifies the UserID whose favorite services want to be displayed. + FavoritesUserID string + + // FavoritesOnly controls filtering the results to those marked as favorites by FavoritesUserID. + FavoritesOnly bool + + // FavoritesFirst indicates that services marked as favorite (by FavoritesUserID) should be returned first (before any non-favorites). + FavoritesFirst bool + + // Limit, if not zero, will limit the number of results. + Limit int +} + +var legacySearchTemplate = template.Must(template.New("search").Parse(` + SELECT DISTINCT ON ({{ .OrderBy }}) + svc.id, + svc.name, + svc.description, + ep.name, + fav notnull + FROM services svc + JOIN escalation_policies ep ON ep.id = svc.escalation_policy_id + {{if not .FavoritesOnly }}LEFT {{end}}JOIN user_favorites fav ON svc.id = fav.tgt_service_id AND fav.user_id = $1 + {{if and .LabelKey .LabelNegate}} + WHERE svc.id NOT IN ( + SELECT tgt_service_id + FROM labels + WHERE + tgt_service_id NOTNULL AND + key = $2 + {{if ne .LabelValue "*"}} AND value = $3{{end}} + ) + {{else if .LabelKey}} + JOIN labels l ON + l.tgt_service_id = svc.id AND + l.key = $2 + {{if ne .LabelValue "*"}} AND value = $3{{end}} + {{else}} + WHERE $2 = '' OR svc.name ILIKE $2 OR svc.description ILIKE $2 + {{end}} + ORDER BY {{ .OrderBy }} + {{if ne .Limit 0}}LIMIT {{.Limit}}{{end}} +`)) + +// LegacySearch will return a list of matching services and the total number of matches available. +func (db *DB) LegacySearch(ctx context.Context, opts *LegacySearchOptions) ([]Service, error) { + if opts == nil { + opts = &LegacySearchOptions{} + } + + userCheck := permission.User + if opts.FavoritesUserID != "" { + userCheck = permission.MatchUser(opts.FavoritesUserID) + } + + err := permission.LimitCheckAny(ctx, permission.System, userCheck) + if err != nil { + return nil, err + } + + err = validate.Text("Search", opts.Search, 0, 250) + if opts.FavoritesOnly || opts.FavoritesFirst || opts.FavoritesUserID != "" { + err = validate.Many(err, validate.UUID("FavoritesUserID", opts.FavoritesUserID)) + } + if err != nil { + return nil, err + } + + var renderContext struct { + LegacySearchOptions + + OrderBy string + + LabelKey string + LabelValue string + LabelNegate bool + + Limit int + } + renderContext.LegacySearchOptions = *opts + + var parts []string + if opts.FavoritesFirst { + parts = append(parts, "fav") + } + parts = append(parts, + "lower(svc.name)", // use lower because we already have a unique index that does this + "svc.name", + ) + renderContext.OrderBy = strings.Join(parts, ",") + + queryArgs := []interface{}{ + opts.FavoritesUserID, + } + + // case sensitive searching for labels + if idx := strings.Index(opts.Search, "="); idx > -1 { + renderContext.LabelKey = opts.Search[:idx] + if strings.HasSuffix(renderContext.LabelKey, "!") { + renderContext.LabelNegate = true + renderContext.LabelKey = strings.TrimSuffix(renderContext.LabelKey, "!") + } + renderContext.LabelValue = opts.Search[idx+1:] + if renderContext.LabelValue == "" { + renderContext.LabelValue = "*" + } + // skip validating LabelValue if search wildcard character or < 3 characters + if renderContext.LabelValue == "*" || len(renderContext.LabelValue) < 3 { + err = validate.LabelKey("LabelKey", renderContext.LabelKey) + } else { + err = validate.Many( + validate.LabelKey("LabelKey", renderContext.LabelKey), + validate.LabelValue("LabelValue", renderContext.LabelValue), + ) + } + if err != nil { + return nil, err + } + + queryArgs = append(queryArgs, renderContext.LabelKey) + if renderContext.LabelValue != "*" { + queryArgs = append(queryArgs, renderContext.LabelValue) + } + + } else { + opts.Search = "%" + search.Escape(opts.Search) + "%" + queryArgs = append(queryArgs, opts.Search) + } + + buf := new(bytes.Buffer) + err = legacySearchTemplate.Execute(buf, renderContext) + if err != nil { + return nil, errors.Wrap(err, "render query") + } + + rows, err := db.db.QueryContext(ctx, buf.String(), queryArgs...) + if err != nil { + return nil, err + } + defer rows.Close() + + var result []Service + for rows.Next() { + var s Service + err = rows.Scan(&s.ID, &s.Name, &s.Description, &s.epName, &s.isUserFavorite) + if err != nil { + return nil, err + } + + result = append(result, s) + } + + return result, nil +} diff --git a/service/search.go b/service/search.go new file mode 100644 index 0000000000..8006264f0a --- /dev/null +++ b/service/search.go @@ -0,0 +1,224 @@ +package service + +import ( + "context" + "database/sql" + "github.com/target/goalert/permission" + "github.com/target/goalert/search" + "github.com/target/goalert/validation/validate" + "strings" + "text/template" + + "github.com/lib/pq" + "github.com/pkg/errors" +) + +// SearchOptions contains criteria for filtering and sorting services. +type SearchOptions struct { + // Search is matched case-insensitive against the service name and description. + Search string `json:"s,omitempty"` + + // FavoritesUserID specifies the UserID whose favorite services want to be displayed. + FavoritesUserID string `json:"u,omitempty"` + + // FavoritesOnly controls filtering the results to those marked as favorites by FavoritesUserID. + FavoritesOnly bool `json:"o,omitempty"` + + // Omit specifies a list of service IDs to exclude from the results. + Omit []string `json:"m,omitempty"` + + // FavoritesFirst indicates that services marked as favorite (by FavoritesUserID) should be returned first (before any non-favorites). + FavoritesFirst bool `json:"f,omitempty"` + + // Limit will limit the number of results. + Limit int `json:"-"` + + After SearchCursor `json:"a,omitempty"` +} + +type SearchCursor struct { + Name string `json:"n"` + IsFavorite bool `json:"f"` +} + +var searchTemplate = template.Must(template.New("search").Parse(` + SELECT{{if .LabelKey}} DISTINCT ON ({{ .OrderBy }}){{end}} + svc.id, + svc.name, + svc.description, + svc.escalation_policy_id, + fav notnull + FROM services svc + {{if not .FavoritesOnly }}LEFT {{end}}JOIN user_favorites fav ON svc.id = fav.tgt_service_id AND {{if .FavoritesUserID}}fav.user_id = :favUserID{{else}}false{{end}} + {{if and .LabelKey (not .LabelNegate)}} + JOIN labels l ON + l.tgt_service_id = svc.id AND + l.key = :labelKey + {{if ne .LabelValue "*"}} AND value = :labelValue{{end}} + {{end}} + WHERE true + {{if .Omit}} + AND not id = any(:omit) + {{end}} + {{- if and .LabelKey .LabelNegate}} + AND svc.id NOT IN ( + SELECT tgt_service_id + FROM labels + WHERE + tgt_service_id NOTNULL AND + key = :labelKey + {{if ne .LabelValue "*"}} AND value = :labelValue{{end}} + ) + {{end}} + {{- if and .Search (not .LabelKey)}} + AND (svc.name ILIKE :search OR svc.description ILIKE :search) + {{- end}} + {{- if .After.Name}} + AND + {{if not .FavoritesFirst}} + lower(svc.name) > lower(:afterName) + {{else if .After.IsFavorite}} + ((fav notnull AND lower(svc.name) > lower(:afterName)) OR fav isnull) + {{else}} + (fav isnull AND lower(svc.name) > lower(:afterName)) + {{end}} + {{- end}} + ORDER BY {{ .OrderBy }} + LIMIT {{.Limit}} +`)) + +type renderData SearchOptions + +func (opts renderData) OrderBy() string { + if opts.FavoritesFirst { + return "fav, lower(svc.name)" + } + + return "lower(svc.name)" +} + +func (opts renderData) LabelKey() string { + idx := strings.IndexByte(opts.Search, '=') + if idx == -1 { + return "" + } + return strings.TrimSuffix(opts.Search[:idx], "!") // if `!=`` is used +} +func (opts renderData) LabelValue() string { + idx := strings.IndexByte(opts.Search, '=') + if idx == -1 { + return "" + } + val := opts.Search[idx+1:] + if val == "" { + return "*" + } + return val +} +func (opts renderData) LabelNegate() bool { + idx := strings.IndexByte(opts.Search, '=') + if idx < 1 { + return false + } + + return opts.Search[idx-1] == '!' +} +func (opts renderData) SearchStr() string { + if opts.Search == "" { + return "" + } + + return "%" + search.Escape(opts.Search) + "%" +} + +func (opts renderData) Normalize() (*renderData, error) { + if opts.Limit == 0 { + opts.Limit = search.DefaultMaxResults + } + + err := validate.Many( + validate.Text("Search", opts.Search, 0, search.MaxQueryLen), + validate.Range("Limit", opts.Limit, 0, search.MaxResults), + validate.ManyUUID("Omit", opts.Omit, 50), + ) + if opts.After.Name != "" { + err = validate.Many(err, validate.IDName("After.Name", opts.After.Name)) + } + if opts.FavoritesOnly || opts.FavoritesFirst || opts.FavoritesUserID != "" { + err = validate.Many(err, validate.UUID("FavoritesUserID", opts.FavoritesUserID)) + } + if err != nil { + return nil, err + } + if opts.LabelKey() != "" { + err = validate.LabelKey("LabelKey", opts.LabelKey()) + if opts.LabelValue() != "*" { + err = validate.Many(err, + validate.LabelValue("LabelValue", opts.LabelValue()), + ) + } + } + if err != nil { + return nil, err + } + + return &opts, nil +} + +func (opts renderData) QueryArgs() []sql.NamedArg { + return []sql.NamedArg{ + sql.Named("favUserID", opts.FavoritesUserID), + sql.Named("labelKey", opts.LabelKey()), + sql.Named("labelValue", opts.LabelValue()), + sql.Named("labelNegate", opts.LabelNegate()), + sql.Named("search", opts.SearchStr()), + sql.Named("afterName", opts.After.Name), + sql.Named("omit", pq.StringArray(opts.Omit)), + } +} + +// Search will return a list of matching services and the total number of matches available. +func (db *DB) Search(ctx context.Context, opts *SearchOptions) ([]Service, error) { + if opts == nil { + opts = &SearchOptions{} + } + + userCheck := permission.User + if opts.FavoritesUserID != "" { + userCheck = permission.MatchUser(opts.FavoritesUserID) + } + + err := permission.LimitCheckAny(ctx, permission.System, userCheck) + if err != nil { + return nil, err + } + + data, err := (*renderData)(opts).Normalize() + if err != nil { + return nil, err + } + + query, args, err := search.RenderQuery(ctx, searchTemplate, data) + if err != nil { + return nil, errors.Wrap(err, "render query") + } + + rows, err := db.db.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + var result []Service + for rows.Next() { + var s Service + err = rows.Scan(&s.ID, &s.Name, &s.Description, &s.EscalationPolicyID, &s.isUserFavorite) + if err != nil { + return nil, err + } + + result = append(result, s) + } + + return result, nil +} diff --git a/service/service.go b/service/service.go new file mode 100644 index 0000000000..ca010375d9 --- /dev/null +++ b/service/service.go @@ -0,0 +1,37 @@ +package service + +import "github.com/target/goalert/validation/validate" + +type Service struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + EscalationPolicyID string `json:"escalation_policy_id"` + + epName string + isUserFavorite bool +} + +func (s Service) EscalationPolicyName() string { + return s.epName +} + +// IsUserFavorite returns a boolean value based on if the service is a favorite of the user or not. +func (s Service) IsUserFavorite() bool { + return s.isUserFavorite +} + +// Normalize will validate and 'normalize' the ContactMethod -- such as making email lower-case +// and setting carrier to "" (for non-phone types). +func (s Service) Normalize() (*Service, error) { + err := validate.Many( + validate.IDName("Name", s.Name), + validate.Text("Description", s.Description, 1, 255), + validate.UUID("EscalationPolicyID", s.EscalationPolicyID), + ) + if err != nil { + return nil, err + } + + return &s, nil +} diff --git a/service/service_test.go b/service/service_test.go new file mode 100644 index 0000000000..0001fe5526 --- /dev/null +++ b/service/service_test.go @@ -0,0 +1,36 @@ +package service + +import ( + "testing" +) + +func TestService_Normalize(t *testing.T) { + test := func(valid bool, s Service) { + name := "valid" + if !valid { + name = "invalid" + } + t.Run(name, func(t *testing.T) { + t.Logf("%+v", s) + _, err := s.Normalize() + if valid && err != nil { + t.Errorf("got %v; want nil", err) + } else if !valid && err == nil { + t.Errorf("got nil err; want non-nil") + } + }) + } + + valid := []Service{ + {Name: "Sample Service", Description: "Sample Service", EscalationPolicyID: "A035FD3C-73C8-4F72-BECD-36B027AE1374"}, + } + invalid := []Service{ + {}, + } + for _, s := range valid { + test(true, s) + } + for _, s := range invalid { + test(false, s) + } +} diff --git a/service/store.go b/service/store.go new file mode 100644 index 0000000000..91b8b0190e --- /dev/null +++ b/service/store.go @@ -0,0 +1,332 @@ +package service + +import ( + "context" + "database/sql" + "github.com/target/goalert/permission" + "github.com/target/goalert/util" + "github.com/target/goalert/validation/validate" + + "github.com/lib/pq" + uuid "github.com/satori/go.uuid" +) + +type Store interface { + FindMany(context.Context, []string) ([]Service, error) + + FindOne(context.Context, string) (*Service, error) + FindOneForUpdate(ctx context.Context, tx *sql.Tx, id string) (*Service, error) + FindOneForUser(ctx context.Context, userID, serviceID string) (*Service, error) + FindAll(context.Context) ([]Service, error) + DeleteManyTx(context.Context, *sql.Tx, []string) error + Insert(context.Context, *Service) (*Service, error) + CreateServiceTx(context.Context, *sql.Tx, *Service) (*Service, error) + + Update(context.Context, *Service) error + UpdateTx(context.Context, *sql.Tx, *Service) error + + Delete(ctx context.Context, id string) error + DeleteTx(ctx context.Context, tx *sql.Tx, id string) error + + FindAllByEP(context.Context, string) ([]Service, error) + LegacySearch(ctx context.Context, opts *LegacySearchOptions) ([]Service, error) + Search(ctx context.Context, opts *SearchOptions) ([]Service, error) +} + +type DB struct { + db *sql.DB + + findOne *sql.Stmt + findOneUp *sql.Stmt + findMany *sql.Stmt + findAll *sql.Stmt + findAllByEP *sql.Stmt + insert *sql.Stmt + update *sql.Stmt + delete *sql.Stmt +} + +func NewDB(ctx context.Context, db *sql.DB) (*DB, error) { + prep := &util.Prepare{DB: db, Ctx: ctx} + p := prep.P + + s := &DB{db: db} + s.findOne = p(` + SELECT + s.id, + s.name, + s.description, + s.escalation_policy_id, + e.name, + fav is distinct from null + FROM + services s + JOIN escalation_policies e ON e.id = s.escalation_policy_id + LEFT JOIN user_favorites fav ON s.id = fav.tgt_service_id AND fav.user_id = $2 + WHERE + s.id = $1 + `) + s.findOneUp = p(` + SELECT + s.id, + s.name, + s.description, + s.escalation_policy_id + FROM services s + WHERE s.id = $1 + FOR UPDATE + `) + s.findMany = p(` + SELECT + s.id, + s.name, + s.description, + s.escalation_policy_id, + e.name, + fav is distinct from null + FROM + services s + JOIN escalation_policies e ON e.id = s.escalation_policy_id + LEFT JOIN user_favorites fav ON s.id = fav.tgt_service_id AND fav.user_id = $2 + WHERE + s.id = any($1) + `) + + s.findAll = p(` + SELECT + s.id, + s.name, + s.description, + s.escalation_policy_id, + e.name, + false + FROM + services s, + escalation_policies e + WHERE + e.id = s.escalation_policy_id + `) + s.findAllByEP = p(` + SELECT + s.id, + s.name, + s.description, + s.escalation_policy_id, + e.name, + false + FROM + services s, + escalation_policies e + WHERE + e.id = $1 AND + e.id = s.escalation_policy_id + `) + s.insert = p(`INSERT INTO services (id,name,description,escalation_policy_id) VALUES ($1,$2,$3,$4)`) + s.update = p(`UPDATE services SET name = $2, description = $3, escalation_policy_id = $4 WHERE id = $1`) + s.delete = p(`DELETE FROM services WHERE id = any($1)`) + + return s, prep.Err +} + +func (db *DB) FindOneForUpdate(ctx context.Context, tx *sql.Tx, id string) (*Service, error) { + err := permission.LimitCheckAny(ctx, permission.User) + if err != nil { + return nil, err + } + err = validate.UUID("ServiceID", id) + if err != nil { + return nil, err + } + var s Service + err = tx.StmtContext(ctx, db.findOneUp).QueryRowContext(ctx, id).Scan(&s.ID, &s.Name, &s.Description, &s.EscalationPolicyID) + if err != nil { + return nil, err + } + return &s, nil +} + +// FindMany returns slice of Service objects given a slice of serviceIDs +func (db *DB) FindMany(ctx context.Context, ids []string) ([]Service, error) { + err := permission.LimitCheckAny(ctx, permission.User) + if err != nil { + return nil, err + } + if len(ids) == 0 { + return nil, nil + } + err = validate.ManyUUID("ServiceIDs", ids, 100) + if err != nil { + return nil, err + } + + rows, err := db.findMany.QueryContext(ctx, pq.StringArray(ids), permission.UserID(ctx)) + if err != nil { + return nil, err + } + defer rows.Close() + return scanAllFrom(rows) +} + +func (db *DB) Insert(ctx context.Context, s *Service) (*Service, error) { + return db.CreateServiceTx(ctx, nil, s) +} +func (db *DB) CreateServiceTx(ctx context.Context, tx *sql.Tx, s *Service) (*Service, error) { + err := permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return nil, err + } + + n, err := s.Normalize() + if err != nil { + return nil, err + } + + n.ID = uuid.NewV4().String() + stmt := db.insert + if tx != nil { + stmt = tx.Stmt(stmt) + } + _, err = stmt.ExecContext(ctx, n.ID, n.Name, n.Description, n.EscalationPolicyID) + if err != nil { + return nil, err + } + + return n, nil +} + +// Delete implements the ServiceInterface interface. +func (db *DB) Delete(ctx context.Context, id string) error { + return db.DeleteTx(ctx, nil, id) +} +func (db *DB) DeleteTx(ctx context.Context, tx *sql.Tx, id string) error { + return db.DeleteManyTx(ctx, tx, []string{id}) +} +func (db *DB) DeleteManyTx(ctx context.Context, tx *sql.Tx, ids []string) error { + err := permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return err + } + err = validate.ManyUUID("ServiceID", ids, 50) + if err != nil { + return err + } + s := db.delete + if tx != nil { + s = tx.StmtContext(ctx, s) + } + _, err = s.ExecContext(ctx, pq.StringArray(ids)) + return err +} + +func wrap(tx *sql.Tx, s *sql.Stmt) *sql.Stmt { + if tx == nil { + return s + } + return tx.Stmt(s) +} + +// Update implements the ServiceStore interface. +func (db *DB) Update(ctx context.Context, s *Service) error { + return db.UpdateTx(ctx, nil, s) +} +func (db *DB) UpdateTx(ctx context.Context, tx *sql.Tx, s *Service) error { + err := permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return err + } + + n, err := s.Normalize() + if err != nil { + return err + } + + err = validate.UUID("ServiceID", n.ID) + if err != nil { + return err + } + + _, err = wrap(tx, db.update).ExecContext(ctx, n.ID, n.Name, n.Description, n.EscalationPolicyID) + return err +} + +func (db *DB) FindOneForUser(ctx context.Context, userID, serviceID string) (*Service, error) { + err := validate.UUID("ServiceID", serviceID) + if err != nil { + return nil, err + } + + var uid sql.NullString + userCheck := permission.User + + if userID != "" { + err := validate.UUID("UserID", userID) + if err != nil { + return nil, err + } + userCheck = permission.MatchUser(userID) + uid.Valid = true + uid.String = userID + } + + err = permission.LimitCheckAny(ctx, userCheck, permission.System) + if err != nil { + return nil, err + } + + row := db.findOne.QueryRowContext(ctx, serviceID, uid) + var s Service + err = scanFrom(&s, row.Scan) + if err != nil { + return nil, err + } + + return &s, nil +} + +func (db *DB) FindOne(ctx context.Context, id string) (*Service, error) { + // old method just calls new method + return db.FindOneForUser(ctx, "", id) +} + +func scanFrom(s *Service, f func(args ...interface{}) error) error { + return f(&s.ID, &s.Name, &s.Description, &s.EscalationPolicyID, &s.epName, &s.isUserFavorite) +} + +func scanAllFrom(rows *sql.Rows) (services []Service, err error) { + var s Service + for rows.Next() { + err = scanFrom(&s, rows.Scan) + if err != nil { + return nil, err + } + services = append(services, s) + } + return services, nil +} + +func (db *DB) FindAll(ctx context.Context) ([]Service, error) { + err := permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return nil, err + } + + rows, err := db.findAll.QueryContext(ctx) + if err != nil { + return nil, err + } + defer rows.Close() + return scanAllFrom(rows) +} +func (db *DB) FindAllByEP(ctx context.Context, epID string) ([]Service, error) { + err := permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return nil, err + } + + rows, err := db.findAllByEP.QueryContext(ctx, epID) + if err != nil { + return nil, err + } + defer rows.Close() + return scanAllFrom(rows) +} diff --git a/smoketest/README.md b/smoketest/README.md new file mode 100644 index 0000000000..e2daf56816 --- /dev/null +++ b/smoketest/README.md @@ -0,0 +1,34 @@ +# Notification Smoketest Suite + +A suite of tests to check that notifications get sent properly. + +## Setup + +1. Ensure you have postgres running locally, the test suite will create timestamped databases while running. +1. Make sure you have the `goalert` binary installed. (run `make install` from the root of the repo) +1. If you want codesigning (Mac OS, prevents the firewall popup) create a trusted cert named `localdev` (detailed steps are below). + +## Running Tests + +Run `make smoketest` from the root of the repo to run all tests. +The script will automatically rebuild goalert, including any change migrations. + +To run a single test you can use `go test` from the smoketest directory. + +## Creating Tests + +- Try to keep the test under 5 minutes, if possible. +- Specify the current migration level when the test is created. This way your initial SQL won't break as migrations are applied in the future, and your test will ensure behavior against newer migrations at the same time. +- Make sure to call `t.Parallel()` and `defer h.Close()` in your test. + +### Creating a Code Signing Cert (Mac OS Only) + +1. Open `Keychain Access` (press Command+Space and start typing the name) +1. Click the menu `Keychain Access` -> `Certificate Assistant` -> `Create a Certificate ...` +1. Enter `localdev` as the name. +1. Set "Certificate Type" to `Code Signing` +1. Click continue, hit yes on the confirmation window. +1. Go to the `login` keychain and find the new cert. +1. Right-click (or control-click) and select `Get Info`. +1. Click the arrow next to `Trust`. +1. Next to "When using this certificate" select `Always Trust`. diff --git a/smoketest/addrules_test.go b/smoketest/addrules_test.go new file mode 100644 index 0000000000..23a0cef4a7 --- /dev/null +++ b/smoketest/addrules_test.go @@ -0,0 +1,98 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" + "time" +) + +/* +# Add Rules Mid-Cycle + +This tests for the following behavior when a notification rule is added during a policy. + +1. Rules do not retro-actively trigger mid-cycle. +1. New rules take effect, if they occur in the future (mid-cycle). + +## Procedure + +1. Create 2 rules, 1 immediate, one after 2 minutes. +1. Check the immediate fired. +1. After 2 minutes, check the 2 minute rule fired. +1. Add a rule for 1 minute, and one for 3 minutes. +1. Check that the 1 minute didn't fire. +1. After another minute (3+ total) ensure the 3 minute rule fired. +1. Escalate the alert +1. Ensure the immediate fired. +1. After 1 minute, ensure the 1 minute rule fired. +*/ +func TestAddRules(t *testing.T) { + t.Parallel() + + sql := ` + insert into users (id, name, email) + values + ({{uuid "uid"}}, 'bob', 'joe'); + + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cid"}}, {{uuid "uid"}}, 'personal', 'SMS', {{phone "1"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "uid"}}, {{uuid "cid"}}, 0), + ({{uuid "uid"}}, {{uuid "cid"}}, 2); + + insert into escalation_policies (id, name, repeat) + values + ({{uuid "eid"}}, 'esc policy', -1); + insert into escalation_policy_steps (id, escalation_policy_id, delay) + values + ({{uuid "esid"}}, {{uuid "eid"}}, 60); + + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "uid"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (service_id, description) + values + ({{uuid "sid"}}, 'testing'); + +` + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + tw := h.Twilio() + d := tw.Device(h.Phone("1")) + d.ExpectSMS("testing") + tw.WaitAndAssert() + + h.FastForward(time.Minute) + + // ADD RULES + h.AddNotificationRule(h.UUID("uid"), h.UUID("cid"), 1) + h.AddNotificationRule(h.UUID("uid"), h.UUID("cid"), 3) + + h.FastForward(time.Minute) + + d.ExpectSMS("testing") + tw.WaitAndAssert() + + h.FastForward(time.Minute) + + d.ExpectSMS("testing") + tw.WaitAndAssert() + + h.Escalate(1, 0) + + d.ExpectSMS("testing") + tw.WaitAndAssert() + + h.FastForward(time.Minute) + + d.ExpectSMS("testing") +} diff --git a/smoketest/dedupnotifications_test.go b/smoketest/dedupnotifications_test.go new file mode 100644 index 0000000000..507da40ae4 --- /dev/null +++ b/smoketest/dedupnotifications_test.go @@ -0,0 +1,58 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" + "time" +) + +// TestDedupNotifications tests that if a single contact method is +// used multiple times in a user's notification rules and if engine +// experiences a disruption and resumes after the notification rule delay, +// that only a single notification is generated. +func TestDedupNotifications(t *testing.T) { + t.Parallel() + + sql := ` + insert into users (id, name, email) + values + ({{uuid "user"}}, 'bob', 'joe'); + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'SMS', {{phone "1"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "user"}}, {{uuid "cm1"}}, 1), + ({{uuid "user"}}, {{uuid "cm1"}}, 2); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "user"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (service_id, description) + values + ({{uuid "sid"}}, 'testing'); + +` + + h := harness.NewHarness(t, sql, "escalation-policy-step-reorder") + defer h.Close() + + h.Delay(time.Second * 15) + + //Test that after 3 minutes, only 1 notification is generated + h.FastForward(time.Minute * 3) + + h.Twilio().Device(h.Phone("1")).ExpectSMS("testing") +} diff --git a/smoketest/deleteescalationpolicy_test.go b/smoketest/deleteescalationpolicy_test.go new file mode 100644 index 0000000000..6258a79f13 --- /dev/null +++ b/smoketest/deleteescalationpolicy_test.go @@ -0,0 +1,45 @@ +package smoketest + +import ( + "fmt" + "github.com/target/goalert/smoketest/harness" + "testing" +) + +// TestDeleteEscalationPolicy tests that it is possible to delete an escalation policy +func TestDeleteEscalationPolicy(t *testing.T) { + t.Parallel() + + const sql = ` + insert into escalation_policies (id, name, description) + values + ({{uuid "ep1"}}, 'test', 'test'); + + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid ""}}, {{uuid "ep1"}}), + ({{uuid ""}}, {{uuid "ep1"}}); +` + + h := harness.NewHarness(t, sql, "heartbeats") + defer h.Close() + + doQL := func(query string) { + g := h.GraphQLQuery(query) + for _, err := range g.Errors { + t.Error("GraphQL Error:", err.Message) + } + if len(g.Errors) > 0 { + t.Fatal("errors returned from GraphQL") + } + t.Log("Response:", string(g.Data)) + } + + doQL(fmt.Sprintf(` + mutation { + deleteEscalationPolicy(input:{id: "%s"}) { + deleted_id + } + } + `, h.UUID("ep1"))) +} diff --git a/smoketest/deleterotation_test.go b/smoketest/deleterotation_test.go new file mode 100644 index 0000000000..7cbbf38e5a --- /dev/null +++ b/smoketest/deleterotation_test.go @@ -0,0 +1,50 @@ +package smoketest + +import ( + "fmt" + "github.com/target/goalert/smoketest/harness" + "testing" +) + +// TestDeleteRotation tests that it is possible to delete a rotation with participants +func TestDeleteRotation(t *testing.T) { + t.Parallel() + + const sql = ` + insert into users (id, name, email, role) + values + ({{uuid "u1"}}, 'bob', 'joe', 'user'), + ({{uuid "u2"}}, 'ben', 'josh', 'user'); + + insert into rotations (id, name, description, type, start_time, time_zone) + values + ({{uuid "r1"}}, 'test', 'test', 'daily', now(), 'UTC'); + + insert into rotation_participants (id, rotation_id, user_id, position) + values + ({{uuid ""}}, {{uuid "r1"}}, {{uuid "u1"}},0), + ({{uuid ""}}, {{uuid "r1"}}, {{uuid "u2"}},1); +` + + h := harness.NewHarness(t, sql, "heartbeats") + defer h.Close() + + doQL := func(query string) { + g := h.GraphQLQuery(query) + for _, err := range g.Errors { + t.Error("GraphQL Error:", err.Message) + } + if len(g.Errors) > 0 { + t.Fatal("errors returned from GraphQL") + } + t.Log("Response:", string(g.Data)) + } + + doQL(fmt.Sprintf(` + mutation { + deleteRotation(input:{id: "%s"}) { + deleted_id + } + } + `, h.UUID("r1"))) +} diff --git a/smoketest/escalation_test.go b/smoketest/escalation_test.go new file mode 100644 index 0000000000..d366f3f666 --- /dev/null +++ b/smoketest/escalation_test.go @@ -0,0 +1,61 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" + "time" +) + +// TestEscalation tests that alerts are escalated automatically, per the delay_minutes setting. +func TestEscalation(t *testing.T) { + t.Parallel() + + const sql = ` +insert into users (id, name, email) +values + ({{uuid "user"}}, 'bob', 'joe'), + ({{uuid "user2"}}, 'bob2', 'joe2'); + +insert into user_contact_methods (id, user_id, name, type, value) +values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'SMS', {{phone "1"}}), + ({{uuid "cm2"}}, {{uuid "user2"}}, 'personal', 'SMS', {{phone "2"}}); + +insert into user_notification_rules (user_id, contact_method_id, delay_minutes) +values + ({{uuid "user"}}, {{uuid "cm1"}}, 0), + ({{uuid "user2"}}, {{uuid "cm2"}}, 0); + +insert into escalation_policies (id, name) +values + ({{uuid "eid"}}, 'esc policy'); + +insert into escalation_policy_steps (id, escalation_policy_id, delay) +values + ({{uuid "es1"}}, {{uuid "eid"}}, 1), + ({{uuid "es2"}}, {{uuid "eid"}}, 60); + +insert into escalation_policy_actions (escalation_policy_step_id, user_id) +values + ({{uuid "es1"}}, {{uuid "user"}}), + ({{uuid "es2"}}, {{uuid "user2"}}); + +insert into services (id, escalation_policy_id, name) +values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + +insert into alerts (service_id, description) +values + ({{uuid "sid"}}, 'testing'); + +` + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + d := h.Twilio().Device(h.Phone("1")) + d.ExpectSMS("testing") + h.Twilio().WaitAndAssert() + + h.FastForward(time.Minute) + h.Twilio().Device(h.Phone("2")).ExpectSMS("testing") +} diff --git a/smoketest/escalationgap_test.go b/smoketest/escalationgap_test.go new file mode 100644 index 0000000000..4ea853dc22 --- /dev/null +++ b/smoketest/escalationgap_test.go @@ -0,0 +1,47 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" +) + +// TestEscalationGap tests that escalation policy step discrepencies are handled. +func TestEscalationGap(t *testing.T) { + t.Parallel() + + sql := ` + insert into users (id, name, email) + values + ({{uuid "user"}}, 'bob', 'joe'); + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'SMS', {{phone "1"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "user"}}, {{uuid "cm1"}}, 0); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id, step_number) + values + ({{uuid "esid"}}, {{uuid "eid"}}, 1); + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "user"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (service_id, description) + values + ({{uuid "sid"}}, 'testing'); + +` + h := harness.NewHarness(t, sql, "add-verification-code") + defer h.Close() + + h.Twilio().Device(h.Phone("1")).ExpectSMS("testing") +} diff --git a/smoketest/escalationnotification_test.go b/smoketest/escalationnotification_test.go new file mode 100644 index 0000000000..c586439822 --- /dev/null +++ b/smoketest/escalationnotification_test.go @@ -0,0 +1,78 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" + "time" +) + +// TestEscalationNotification ensures that notification rules +// don't repeat during an escalation step, and continue to completion. +func TestEscalationNotification(t *testing.T) { + t.Parallel() + sql := ` + insert into users (id, name, email) + values + ({{uuid "uid"}}, 'bob', 'joe'); + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "c1"}}, {{uuid "uid"}}, 'personal', 'SMS', {{phone "1"}}), + ({{uuid "c2"}}, {{uuid "uid"}}, 'personal', 'VOICE', {{phone "2"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "uid"}}, {{uuid "c1"}}, 0), + ({{uuid "uid"}}, {{uuid "c2"}}, 0), + ({{uuid "uid"}}, {{uuid "c1"}}, 1); + + insert into escalation_policies (id, name, repeat) + values + ({{uuid "eid"}}, 'esc policy', -1); + insert into escalation_policy_steps (id, escalation_policy_id, delay) + values + ({{uuid "esid"}}, {{uuid "eid"}}, 60); + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "uid"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (service_id, description) + values + ({{uuid "sid"}}, 'testing'); +` + + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + tw := h.Twilio() + d1 := tw.Device(h.Phone("1")) + d2 := tw.Device(h.Phone("2")) + + d1.ExpectSMS("testing") + d2.ExpectVoice("testing") + tw.WaitAndAssert() + + h.Escalate(1, 0) // results in the start of a 2nd cycle + + d1.ExpectSMS("testing") + d2.ExpectVoice("testing") + tw.WaitAndAssert() + + h.FastForward(2 * time.Minute) // ensure both rules have elapsed + + // 1 sms from the first step, 1 from the escalated one + d1.ExpectSMS("testing") + d1.ExpectSMS("testing") + tw.WaitAndAssert() + + h.Escalate(1, 1) + d1.ExpectSMS("testing") + d2.ExpectVoice("testing") + tw.WaitAndAssert() + + h.FastForward(2 * time.Minute) + d1.ExpectSMS("testing") +} diff --git a/smoketest/genericapi_test.go b/smoketest/genericapi_test.go new file mode 100644 index 0000000000..eb6583d16e --- /dev/null +++ b/smoketest/genericapi_test.go @@ -0,0 +1,64 @@ +package smoketest + +import ( + "bytes" + "github.com/target/goalert/smoketest/harness" + "net/http" + "net/url" + "testing" +) + +func TestGenericAPI(t *testing.T) { + t.Parallel() + + const sql = ` + insert into users (id, name, email) + values + ({{uuid "user"}}, 'bob', 'joe'); + + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'SMS', {{phone "1"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "user"}}, {{uuid "cm1"}}, 0); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "user"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into integration_keys (id, type, name, service_id) + values + ({{uuid "int_key"}}, 'generic', 'my key', {{uuid "sid"}}); +` + h := harness.NewHarness(t, sql, "add-generic-integration-key") + defer h.Close() + + u := h.URL() + "/v1/api/alerts?key=" + h.UUID("int_key") + v := make(url.Values) + v.Set("summary", "hello") + v.Set("details", "woot") + + resp, err := http.Post(u, "application/x-www-form-urlencoded", bytes.NewBufferString(v.Encode())) + if err != nil { + t.Fatal("post to generic endpoint failed:", err) + } else if resp.StatusCode/100 != 2 { + t.Error("non-2xx response:", resp.Status) + } + resp.Body.Close() + + h.Twilio().Device(h.Phone("1")).ExpectSMS("hello") +} diff --git a/smoketest/genericapiclose_test.go b/smoketest/genericapiclose_test.go new file mode 100644 index 0000000000..1e2dd5f7d4 --- /dev/null +++ b/smoketest/genericapiclose_test.go @@ -0,0 +1,91 @@ +package smoketest + +import ( + "bytes" + "github.com/target/goalert/smoketest/harness" + "net/http" + "net/url" + "testing" + "time" +) + +func TestGenericAPIClose(t *testing.T) { + t.Parallel() + + const sql = ` + insert into users (id, name, email) + values + ({{uuid "user"}}, 'bob', 'joe'); + + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'SMS', {{phone "1"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "user"}}, {{uuid "cm1"}}, 0), + ({{uuid "user"}}, {{uuid "cm1"}}, 1); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "user"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into integration_keys (id, type, name, service_id) + values + ({{uuid "int_key"}}, 'generic', 'my key', {{uuid "sid"}}); +` + h := harness.NewHarness(t, sql, "add-generic-integration-key") + defer h.Close() + + fire := func(key, summary, dedup string, close bool) { + u := h.URL() + "/v1/api/alerts?key=" + key + v := make(url.Values) + v.Set("summary", summary) + if dedup != "" { + v.Set("dedup", dedup) + } + if close { + v.Set("action", "close") + } + + resp, err := http.Post(u, "application/x-www-form-urlencoded", bytes.NewBufferString(v.Encode())) + if err != nil { + t.Fatal("post to generic endpoint failed:", err) + } else if resp.StatusCode/100 != 2 { + t.Error("non-2xx response:", resp.Status) + } + resp.Body.Close() + } + + key := h.UUID("int_key") + fire(key, "test1", "", false) + fire(key, "test2", "", false) + fire(key, "test3", "dedup", false) + fire(key, "test4", "", true) // should not open one in the first place + + d := h.Twilio().Device(h.Phone("1")) + + d.ExpectSMS("test1") + d.ExpectSMS("test2") + d.ExpectSMS("test3") + h.Twilio().WaitAndAssert() + + fire(key, "test2", "", true) + fire(key, "test3", "dedup", true) + + h.FastForward(time.Minute) + + d.ExpectSMS("test1") +} diff --git a/smoketest/genericapidedup_test.go b/smoketest/genericapidedup_test.go new file mode 100644 index 0000000000..a345f37c0f --- /dev/null +++ b/smoketest/genericapidedup_test.go @@ -0,0 +1,105 @@ +package smoketest + +import ( + "bytes" + "github.com/target/goalert/smoketest/harness" + "net/http" + "net/url" + "testing" +) + +func TestGenericAPIDedup(t *testing.T) { + t.Parallel() + + const sql = ` + insert into users (id, name, email) + values + ({{uuid "u1"}}, 'bob', 'joe'), + ({{uuid "u2"}}, 'bob2', 'joe2'); + + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "u1"}}, 'personal', 'SMS', {{phone "1"}}), + ({{uuid "cm2"}}, {{uuid "u2"}}, 'personal', 'SMS', {{phone "2"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "u1"}}, {{uuid "cm1"}}, 0), + ({{uuid "u2"}}, {{uuid "cm2"}}, 0); + + insert into escalation_policies (id, name) + values + ({{uuid "e1"}}, 'esc policy1'), + ({{uuid "e2"}}, 'esc policy2'); + + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "e1s1"}}, {{uuid "e1"}}), + ({{uuid "e2s1"}}, {{uuid "e2"}}); + + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "e1s1"}}, {{uuid "u1"}}), + ({{uuid "e2s1"}}, {{uuid "u2"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "s1"}}, {{uuid "e1"}}, 'service1'), + ({{uuid "s2"}}, {{uuid "e2"}}, 'service2'); + + insert into integration_keys (id, type, name, service_id) + values + ({{uuid "i1"}}, 'generic', 'my key', {{uuid "s1"}}), + ({{uuid "i2"}}, 'generic', 'my key', {{uuid "s2"}}); + + insert into alerts (source, service_id, description) + values + ('generic', {{uuid "s1"}}, 'pre-existing'); +` + h := harness.NewHarness(t, sql, "add-generic-integration-key") + defer h.Close() + + fire := func(key, summary, dedup string) { + u := h.URL() + "/v1/api/alerts?key=" + key + v := make(url.Values) + v.Set("summary", summary) + if dedup != "" { + v.Set("dedup", dedup) + } + + resp, err := http.Post(u, "application/x-www-form-urlencoded", bytes.NewBufferString(v.Encode())) + if err != nil { + t.Fatal("post to generic endpoint failed:", err) + } else if resp.StatusCode/100 != 2 { + t.Error("non-2xx response:", resp.Status) + } + resp.Body.Close() + } + tw := h.Twilio() + d1 := tw.Device(h.Phone("1")) + d2 := tw.Device(h.Phone("2")) + + d1.ExpectSMS("pre-existing") // already created + tw.WaitAndAssert() + + fire(h.UUID("i1"), "pre-existing", "") // should get deduped and never notify + + fire(h.UUID("i1"), "hello", "") + fire(h.UUID("i1"), "hello", "") // 1 alert + d1.ExpectSMS("hello") + tw.WaitAndAssert() + + fire(h.UUID("i1"), "goodbye", "") + fire(h.UUID("i2"), "hello", "") + d1.ExpectSMS("goodbye") + d2.ExpectSMS("hello") // ensure 2nd service can get an alert + tw.WaitAndAssert() + + fire(h.UUID("i1"), "hello", "foo") + fire(h.UUID("i1"), "hello2", "foo") + d1.ExpectSMS("hello") + + fire(h.UUID("i2"), "hello", "foo") + d2.ExpectSMS("hello") + tw.WaitAndAssert() +} diff --git a/smoketest/grafana_test.go b/smoketest/grafana_test.go new file mode 100644 index 0000000000..014cb17915 --- /dev/null +++ b/smoketest/grafana_test.go @@ -0,0 +1,69 @@ +package smoketest + +import ( + "bytes" + "github.com/target/goalert/smoketest/harness" + "net/http" + "testing" +) + +func TestGrafana(t *testing.T) { + t.Parallel() + + const sql = ` + insert into users (id, name, email) + values + ({{uuid "user"}}, 'bob', 'joe'); + + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'SMS', {{phone "1"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "user"}}, {{uuid "cm1"}}, 0); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "user"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into integration_keys (id, type, name, service_id) + values + ({{uuid "int_key"}}, 'grafana', 'my key', {{uuid "sid"}}); +` + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + url := h.URL() + "/v1/webhooks/grafana?integration_key=" + h.UUID("int_key") + + resp, err := http.Post(url, "application/json", bytes.NewBufferString(` + { + "ruleName": "bob", + "ruleId": 1, + "message": "test", + "state": "alerting", + "title": "woot", + "ruleUrl": "dontcare" + } + `)) + if err != nil { + t.Fatal("post to grafana endpoint failed:", err) + } else if resp.StatusCode != 200 { + t.Error("non-200 response:", resp.Status) + } + resp.Body.Close() + + h.Twilio().Device(h.Phone("1")).ExpectSMS("bob") +} diff --git a/smoketest/graphql2users_test.go b/smoketest/graphql2users_test.go new file mode 100644 index 0000000000..ea7dd80791 --- /dev/null +++ b/smoketest/graphql2users_test.go @@ -0,0 +1,103 @@ +package smoketest + +import ( + "encoding/json" + "fmt" + "github.com/target/goalert/smoketest/harness" + "testing" +) + +// TestGraphQL2Users tests most operations on users API via GraphQL2 endpoint. +func TestGraphQL2Users(t *testing.T) { + t.Parallel() + + sql := ` + insert into users (id, name, email, role) + values + ({{uuid "user"}}, 'bob', 'joe', 'admin'); +` + + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + doQL := func(t *testing.T, query string, res interface{}) { + g := h.GraphQLQuery2(query) + for _, err := range g.Errors { + t.Error("GraphQL Error:", err.Message) + } + if len(g.Errors) > 0 { + t.Fatal("errors returned from GraphQL") + } + t.Log("Response:", string(g.Data)) + if res == nil { + return + } + err := json.Unmarshal(g.Data, &res) + if err != nil { + t.Fatal("failed to parse response:", err) + } + } + + doQL(t, fmt.Sprintf(` + mutation { + addAuthSubject(input: { + userID: "%s", + providerID: "%s", + subjectID: "%s", + }) + } + `, h.UUID("user"), "provider1", "subject1"), nil) + + var a struct { + Nodes struct { + ProviderID string + SubjectID string + UserID string + } + } + + doQL(t, fmt.Sprintf(` + query { + authSubjectsForProvider(providerID: "%s") { + nodes { + providerID + subjectID + userID + } + } + } + `, "provider1"), &a) + + if a.Nodes.UserID == h.UUID("user") { + if a.Nodes.SubjectID != "subject1" { + t.Fatalf("ERROR: retrieved subjectID=%s; want %s", a.Nodes.SubjectID, "subject1") + } + } + + doQL(t, fmt.Sprintf(` + mutation { + deleteAuthSubject(input: { + userID: "%s", + providerID: "%s", + subjectID: "%s", + }) + } + `, h.UUID("user"), "provider1", "subject1"), nil) + + // After deleting provider, no providers should exist + doQL(t, fmt.Sprintf(` + query { + authSubjectsForProvider(providerID: "%s") { + nodes { + providerID + subjectID + userID + } + } + } + `, "provider1"), &a) + + if len(a.Nodes.ProviderID) != 0 { + t.Fatalf("ERROR: retrieved Nodes=%s; want nil", a.Nodes.ProviderID) + } +} diff --git a/smoketest/graphqlalert_test.go b/smoketest/graphqlalert_test.go new file mode 100644 index 0000000000..e7a06cfcd9 --- /dev/null +++ b/smoketest/graphqlalert_test.go @@ -0,0 +1,204 @@ +package smoketest + +import ( + "encoding/json" + "fmt" + "github.com/target/goalert/smoketest/harness" + "testing" + "time" +) + +// TestGraphQLAlert tests that all steps up to, and including, generating +// an alert via GraphQL result in notifications going out. +// +// Specifically, mutations tested include: +// - createContactMethod +// - createNotificationRule +// - createSchedule +// - updateSchedule +// - addRotationParticipant +// - createOrUpdateEscalationPolicy +// - createOrUpdateEscalationPolicyStep +// - createService +// - createAlert +func TestGraphQLAlert(t *testing.T) { + t.Parallel() + + const sql = ` + insert into users (id, name, email) + values + ({{uuid "u1"}}, 'bob', 'joe'), + ({{uuid "u2"}}, 'ben', 'josh'); +` + loc, err := time.LoadLocation("America/Chicago") + if err != nil { + t.Fatal("failed to load America/Chicago tzdata:", err) + } + + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + doQL := func(query string, res interface{}) { + g := h.GraphQLQuery(query) + for _, err := range g.Errors { + t.Error("GraphQL Error:", err.Message) + } + if len(g.Errors) > 0 { + t.Fatal("errors returned from GraphQL") + } + t.Log("Response:", string(g.Data)) + if res == nil { + return + } + err := json.Unmarshal(g.Data, &res) + if err != nil { + t.Fatal("failed to parse response:", err) + } + } + + uid1, uid2 := h.UUID("u1"), h.UUID("u2") + phone1, phone2 := h.Phone("u1"), h.Phone("u2") + + var cm1, cm2 struct{ CreateContactMethod struct{ ID string } } + doQL(fmt.Sprintf(` + mutation { + createContactMethod(input:{ + user_id: "%s", + name: "default", + type: SMS, + value: "%s" + }) { + id + } + } + `, uid1, phone1), &cm1) + doQL(fmt.Sprintf(` + mutation { + createContactMethod(input:{ + user_id: "%s", + name: "default", + type: SMS, + value: "%s" + }) { + id + } + } + `, uid2, phone2), &cm2) + + doQL(fmt.Sprintf(` + mutation { + createNotificationRule(input:{ + user_id: "%s" + contact_method_id: "%s", + delay_minutes: 0 + }){ + id + } + } + + `, uid1, cm1.CreateContactMethod.ID), nil) + + doQL(fmt.Sprintf(` + mutation { + createNotificationRule(input:{ + user_id: "%s" + contact_method_id: "%s", + delay_minutes: 0 + }){ + id + } + } + + `, uid2, cm2.CreateContactMethod.ID), nil) + + var sched struct { + CreateSchedule struct { + ID string + Rotations []struct{ ID string } + } + } + + doQL(fmt.Sprintf(` + mutation { + createSchedule(input:{ + name: "default", + description: "default testing", + time_zone: "America/Chicago", + default_rotation: { + type: daily, + start_time: "%s", + shift_length:1, + } + }){ + id + rotations { + id + } + } + } + + `, time.Now().Add(-time.Hour).In(loc).Format(time.RFC3339)), &sched) + + if len(sched.CreateSchedule.Rotations) != 1 { + t.Fatal("createSchedule did not create (or did not return) default rotation") + } + rotID := sched.CreateSchedule.Rotations[0].ID + + doQL(fmt.Sprintf(` + mutation { + addRotationParticipant(input:{ + user_id: "%s", + rotation_id: "%s" + }) {id} + } + + `, uid1, rotID), nil) + + var esc struct{ CreateOrUpdateEscalationPolicy struct{ ID string } } + doQL(` + mutation { + createOrUpdateEscalationPolicy(input:{ + repeat: 0, + name: "default" + }){id} + } + `, &esc) + + var step struct { + CreateOrUpdateEscalationPolicyStep struct{ Step struct{ ID string } } + } + doQL(fmt.Sprintf(` + mutation { + createOrUpdateEscalationPolicyStep(input:{ + delay_minutes: 60, + escalation_policy_id: "%s", + user_ids: ["%s"], + schedule_ids: ["%s"] + }){ + step: escalation_policy_step {id} + } + } + `, esc.CreateOrUpdateEscalationPolicy.ID, uid2, sched.CreateSchedule.ID), &step) + var svc struct{ CreateService struct{ ID string } } + doQL(fmt.Sprintf(` + mutation { + createService(input:{ + name: "default", + escalation_policy_id: "%s" + }){id} + } + `, esc.CreateOrUpdateEscalationPolicy.ID), &svc) + + // finally.. we can create the alert + doQL(fmt.Sprintf(` + mutation { + createAlert(input:{ + description: "brok", + service_id: "%s" + }){id} + } + `, svc.CreateService.ID), nil) + + h.Twilio().Device(phone1).ExpectSMS() + h.Twilio().Device(phone2).ExpectSMS() +} diff --git a/smoketest/graphqlcreateschedule_test.go b/smoketest/graphqlcreateschedule_test.go new file mode 100644 index 0000000000..b2baaf31bb --- /dev/null +++ b/smoketest/graphqlcreateschedule_test.go @@ -0,0 +1,87 @@ +package smoketest + +import ( + "encoding/json" + "fmt" + "github.com/target/goalert/smoketest/harness" + "testing" +) + +// TestGraphQLCreateSchedule tests that all steps for creating a schedule (without default rotation) are carried out without any errors. +func TestGraphQLCreateSchedule(t *testing.T) { + t.Parallel() + + const sql = ` + insert into users (id, name, email) + values + ({{uuid "u1"}}, 'bob', 'joe'), + ({{uuid "u2"}}, 'ben', 'josh'); +` + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + doQL := func(query string, res interface{}) { + g := h.GraphQLQuery(query) + for _, err := range g.Errors { + t.Error("GraphQL Error:", err.Message) + } + if len(g.Errors) > 0 { + t.Fatal("errors returned from GraphQL") + } + t.Log("Response:", string(g.Data)) + if res == nil { + return + } + err := json.Unmarshal(g.Data, &res) + if err != nil { + t.Fatal("failed to parse response:", err) + } + } + + var sched struct { + CreateSchedule struct { + ID string + Rotations []struct{ ID string } + } + } + + doQL(fmt.Sprintf(` + mutation { + createSchedule(input:{ + name: "default_testing", + description: "default testing", + time_zone: "America/Chicago", + }){ + id + rotations { + id + } + } + } + `), &sched) + + sID := sched.CreateSchedule.ID + t.Log("Created Schedule ID :", sID) + + var newSched struct { + Schedule struct { + Rotations []struct{} + } + } + doQL(fmt.Sprintf(` + query { + schedule(id: "%s") { + rotations { + id + } + } + } + + `, sID), &newSched) + + t.Log("Number of rotations:", newSched) + + if len(newSched.Schedule.Rotations) != 0 { + t.Errorf("got %d rotations; want 0", len(newSched.Schedule.Rotations)) + } +} diff --git a/smoketest/graphqlcreatescheduledefaultrotation_test.go b/smoketest/graphqlcreatescheduledefaultrotation_test.go new file mode 100644 index 0000000000..8db9a0af07 --- /dev/null +++ b/smoketest/graphqlcreatescheduledefaultrotation_test.go @@ -0,0 +1,94 @@ +package smoketest + +import ( + "encoding/json" + "fmt" + "github.com/target/goalert/smoketest/harness" + "testing" + "time" +) + +// TestGraphQLCreateScheduleWithDefaultRotation tests that all steps for creating a schedule with default rotation are carried out without any errors. +func TestGraphQLCreateScheduleWithDefaultRotation(t *testing.T) { + t.Parallel() + + const sql = ` + insert into users (id, name, email) + values + ({{uuid "u1"}}, 'bob', 'joe'), + ({{uuid "u2"}}, 'ben', 'josh'); +` + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + doQL := func(query string, res interface{}) { + g := h.GraphQLQuery(query) + for _, err := range g.Errors { + t.Error("GraphQL Error:", err.Message) + } + if len(g.Errors) > 0 { + t.Fatal("errors returned from GraphQL") + } + t.Log("Response:", string(g.Data)) + if res == nil { + return + } + err := json.Unmarshal(g.Data, &res) + if err != nil { + t.Fatal("failed to parse response:", err) + } + } + + var sched struct { + CreateSchedule struct { + ID string + Rotations []struct{ ID string } + } + } + + doQL(fmt.Sprintf(` + mutation { + createSchedule(input:{ + name: "default_testing", + description: "default testing", + time_zone: "America/Chicago", + default_rotation: { + type: daily, + start_time: "%s", + shift_length:1, + } + }){ + id + rotations { + id + } + } + } + + `, time.Now().Format(time.RFC3339)), &sched) + + sID := sched.CreateSchedule.ID + t.Log("Created Schedule ID :", sID) + + var newSched struct { + Schedule struct { + Rotations []struct{} + } + } + doQL(fmt.Sprintf(` + query { + schedule(id: "%s") { + rotations { + id + } + } + } + + `, sID), &newSched) + + t.Log("Number of rotations:", newSched) + + if len(newSched.Schedule.Rotations) != 1 { + t.Errorf("got %d rotations; want 1", len(newSched.Schedule.Rotations)) + } +} diff --git a/smoketest/graphqlmultiplealerts_test.go b/smoketest/graphqlmultiplealerts_test.go new file mode 100644 index 0000000000..03a810eb8e --- /dev/null +++ b/smoketest/graphqlmultiplealerts_test.go @@ -0,0 +1,135 @@ +package smoketest + +import ( + "encoding/json" + "fmt" + "github.com/target/goalert/smoketest/harness" + "testing" + "time" +) + +// TestGraphQLMultipleAlerts tests that all steps up to, and including, generating +// alerts and updating their statuses via GraphQL. +func TestGraphQLMultipleAlerts(t *testing.T) { + t.Parallel() + + sql := ` + insert into users (id, name, email, role) + values + ({{uuid "user"}}, 'bob', 'joe', 'user'); + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'SMS', {{phone "1"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "user"}}, {{uuid "cm1"}}, 0), + ({{uuid "user"}}, {{uuid "cm1"}}, 1); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "user"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); +` + + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + phone := h.Phone("1") + sid := h.UUID("sid") + + // Creating alerts + h.CreateAlert(sid, "alert1") + h.CreateAlert(sid, "alert2") + + // Expect 2 SMS for 2 unacknowledged alerts + h.Twilio().Device(phone).ExpectSMS("alert1") + h.Twilio().Device(phone).ExpectSMS("alert2") + + // No more SMS should be sent out + h.Twilio().WaitAndAssert() + + h.CreateAlert(sid, "alert3") + + // GraphQL2 section starts + doQL2 := func(query string, res interface{}) { + g := h.GraphQLQuery2(query) + for _, err := range g.Errors { + t.Error("GraphQL Error:", err.Message) + } + if len(g.Errors) > 0 { + t.Fatal("errors returned from GraphQL") + } + t.Log("Response:", string(g.Data)) + if res == nil { + return + } + err := json.Unmarshal(g.Data, &res) + if err != nil { + t.Fatal("failed to parse response:", err) + } + } + + h.Twilio().Device(phone).ExpectSMS("alert3") + + // No more SMS should be sent out + h.Twilio().WaitAndAssert() + + // Acknowledging alert #3 + doQL2(fmt.Sprintf(` + mutation { + updateAlerts(input: { + alertIDs: [%d], + newStatus: StatusAcknowledged, + }){alertID} + } + `, 3), nil) + + h.FastForward(1 * time.Minute) + // Expect 2 SMS for 2 unacknowledged alerts + h.Twilio().Device(phone).ExpectSMS("alert1") + h.Twilio().Device(phone).ExpectSMS("alert2") + + // No SMS should be sent out + h.Twilio().WaitAndAssert() + + // Escalating multiple (3) alerts + doQL2(fmt.Sprintf(` + mutation { + escalateAlerts(input: [%d, %d, %d], + ){alertID} + } + `, 1, 2, 3), nil) + + // Expect 3 SMS for 3 escalated alerts + h.Twilio().Device(phone).ExpectSMS("alert1") + h.Twilio().Device(phone).ExpectSMS("alert2") + h.Twilio().Device(phone).ExpectSMS("alert3") + + h.Twilio().WaitAndAssert() + + // Closing multiple (3) alerts + doQL2(fmt.Sprintf(` + mutation { + updateAlerts(input: { + alertIDs: [%d, %d, %d], + newStatus: StatusClosed, + }){alertID} + } + `, 1, 2, 3), nil) + + h.FastForward(1 * time.Minute) + + // No more messages should be sent out + h.Twilio().WaitAndAssert() + +} diff --git a/smoketest/graphqloncall_test.go b/smoketest/graphqloncall_test.go new file mode 100644 index 0000000000..6c3bfe0cf3 --- /dev/null +++ b/smoketest/graphqloncall_test.go @@ -0,0 +1,188 @@ +package smoketest + +import ( + "encoding/json" + "fmt" + "github.com/target/goalert/smoketest/harness" + "strings" + "testing" +) + +// TestGraphQLOnCall tests the logic behind `User.is_on_call`. +func TestGraphQLOnCall(t *testing.T) { + t.Parallel() + + h := harness.NewHarness(t, "", "escalation-policy-step-reorder") + defer h.Close() + + doQL := func(t *testing.T, query string, res interface{}) { + g := h.GraphQLQueryT(t, query, "/v1/graphql") + for _, err := range g.Errors { + t.Error("GraphQL Error:", err.Message) + } + if len(g.Errors) > 0 { + t.Fatal("errors returned from GraphQL") + } + t.Log("Response:", string(g.Data)) + + if res == nil { + return + } + err := json.Unmarshal(g.Data, &res) + if err != nil { + t.Fatal(err) + } + } + + var idCounter int + + check := func(name, input string, user1OnCall, user2OnCall bool) { + u1 := h.CreateUser() + u2 := h.CreateUser() + input = strings.Replace(input, "u1", u1.ID, -1) + input = strings.Replace(input, "u2", u2.ID, -1) + input = strings.Replace(input, "generated", fmt.Sprintf("generated%d", idCounter), -1) + idCounter++ + query := fmt.Sprintf(` + mutation { + createAll(input:{ + %s + }) { + services {id} + escalation_policies {id} + rotations {id} + user_overrides {id} + schedules {id} + } + } + `, input) + t.Run(name, func(t *testing.T) { + + var resp struct { + CreateAll map[string][]struct{ ID string } + } + doQL(t, query, &resp) + h.Trigger() + + var onCall struct { + User struct { + IsOnCall bool `json:"on_call"` + } + } + + doQL(t, fmt.Sprintf(` + query { + user(id: "%s") { on_call } + + } + `, u1.ID), &onCall) + + if user1OnCall != onCall.User.IsOnCall { + t.Fatalf("ERROR: User1 On-Call=%t; want %t", onCall.User.IsOnCall, user1OnCall) + } + + doQL(t, fmt.Sprintf(` + query { + user(id: "%s") { on_call } + + } + `, u2.ID), &onCall) + + if user2OnCall != onCall.User.IsOnCall { + t.Fatalf("ERROR: User2 On-Call=%t; want %t", onCall.User.IsOnCall, user2OnCall) + } + + }) + } + + // Randomly generate names, instead of hard-coding + // User directly on EP is always on call + check("User EP Direct", ` + escalation_policies: [{ id_placeholder: "ep", name: "generated", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: user, target_id: "u1" }] }] + services: [{id_placeholder: "svc", description: "ok", name: "generated", escalation_policy_id: "ep"}] + `, true, false) + + // Active participant directly on EP is always on call + check("User EP Rotation Direct", ` + escalation_policies: [{ id_placeholder: "ep", name: "generated", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: rotation, target_id: "rot" }] }] + services: [{id_placeholder: "svc", description: "ok", name: "generated", escalation_policy_id: "ep"}] + rotations: [{id_placeholder: "rot", time_zone: "UTC", shift_length: 1, type: weekly, start: "2006-01-02T15:04:05Z", name: "generated", description: "1"}] + rotation_participants: [{rotation_id: "rot", user_id: "u1"}] + `, true, false) + + // EP -> Schedule, where there is an active ADD for a user + check("User EP Schedule Add Override", ` + escalation_policies: [{ id_placeholder: "ep", name: "generated", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: schedule, target_id: "s" }] }] + services: [{id_placeholder: "svc", description: "ok", name: "generated", escalation_policy_id: "ep"}] + schedules: [{id_placeholder: "s", time_zone: "UTC", name: "generated", description: "1"}] + user_overrides: [{add_user_id: "u1", start_time: "1006-01-02T15:04:05Z", end_time: "4006-01-02T15:04:05Z", target_type: schedule, target_id: "s"}] + `, true, false) + + // Active schedule rule, user is replaced + check("User EP Schedule Replace Override", ` + escalation_policies: [{ id_placeholder: "ep", name: "generated", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: schedule, target_id: "s" }] }] + services: [{id_placeholder: "svc", description: "ok", name: "generated", escalation_policy_id: "ep"}] + schedules: [{id_placeholder: "s", time_zone: "UTC", name: "generated", description: "1"}] + schedule_rules: [{target:{target_type:user, target_id:"u2"}, start:"00:00", end:"23:59", schedule_id: "s", sunday: true, monday:true, tuesday:true, wednesday: true, thursday: true, friday: true, saturday: true}] + user_overrides: [{add_user_id: "u1", remove_user_id: "u2", start_time: "1006-01-02T15:04:05Z", end_time: "4006-01-02T15:04:05Z", target_type: schedule, target_id: "s"}] + `, true, false) + + // Same scenario, user is NOT replaced (no override) + check("User EP Schedule Replace Override Absent", ` + escalation_policies: [{ id_placeholder: "ep", name: "generated", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: schedule, target_id: "s" }] }] + services: [{id_placeholder: "svc", description: "ok", name: "generated", escalation_policy_id: "ep"}] + schedules: [{id_placeholder: "s", time_zone: "UTC", name: "generated", description: "1"}] + schedule_rules: [{target:{target_type:user, target_id:"u2"}, start:"00:00", end:"23:59", schedule_id: "s", sunday: true, monday:true, tuesday:true, wednesday: true, thursday: true, friday: true, saturday: true}] + `, false, true) + + // Active schedule rule, active rotation participant is replaced + check("User EP Schedule Replace Rotation Override", ` + escalation_policies: [{ id_placeholder: "ep", name: "generated", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: schedule, target_id: "s" }] }] + services: [{id_placeholder: "svc", description: "ok", name: "generated", escalation_policy_id: "ep"}] + schedules: [{id_placeholder: "s", time_zone: "UTC", name: "generated", description: "1"}] + schedule_rules: [{target:{target_type:rotation, target_id:"rot"}, start:"00:00", end:"23:59", schedule_id: "s", sunday: true, monday:true, tuesday:true, wednesday: true, thursday: true, friday: true, saturday: true}] + rotations: [{id_placeholder: "rot", time_zone: "UTC", shift_length: 1, type: weekly, start: "2006-01-02T15:04:05Z", name: "generated", description: "1"}] + rotation_participants: [{rotation_id: "rot", user_id: "u2"}] + user_overrides: [{add_user_id: "u1", remove_user_id: "u2", start_time: "1006-01-02T15:04:05Z", end_time: "4006-01-02T15:04:05Z", target_type: schedule, target_id: "s"}] + `, true, false) + + // Active schedule rule, active rotation participant is NOT replaced (no override) + check("User EP Schedule Replace Rotation Override Absent", ` + escalation_policies: [{ id_placeholder: "ep", name: "generated", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: schedule, target_id: "s" }] }] + services: [{id_placeholder: "svc", description: "ok", name: "generated", escalation_policy_id: "ep"}] + schedules: [{id_placeholder: "s", time_zone: "UTC", name: "generated", description: "1"}] + schedule_rules: [{target:{target_type:rotation, target_id:"rot"}, start:"00:00", end:"23:59", schedule_id: "s", sunday: true, monday:true, tuesday:true, wednesday: true, thursday: true, friday: true, saturday: true}] + rotations: [{id_placeholder: "rot", time_zone: "UTC", shift_length: 1, type: weekly, start: "2006-01-02T15:04:05Z", name: "generated", description: "1"}] + rotation_participants: [{rotation_id: "rot", user_id: "u2"}] + `, false, true) + + // Active schedule rule, active rotation participant is removed + check("User EP Schedule Remove Rotation Override", ` + escalation_policies: [{ id_placeholder: "ep", name: "generated", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: schedule, target_id: "s" }] }] + services: [{id_placeholder: "svc", description: "ok", name: "generated", escalation_policy_id: "ep"}] + schedules: [{id_placeholder: "s", time_zone: "UTC", name: "generated", description: "1"}] + schedule_rules: [{target:{target_type:rotation, target_id:"rot"}, start:"00:00", end:"23:59", schedule_id: "s", sunday: true, monday:true, tuesday:true, wednesday: true, thursday: true, friday: true, saturday: true}] + rotations: [{id_placeholder: "rot", time_zone: "UTC", shift_length: 1, type: weekly, start: "2006-01-02T15:04:05Z", name: "generated", description: "1"}] + rotation_participants: [{rotation_id: "rot", user_id: "u2"}] + user_overrides: [{ remove_user_id: "u2", start_time: "1006-01-02T15:04:05Z", end_time: "4006-01-02T15:04:05Z", target_type: schedule, target_id: "s"}] + `, false, false) + + // Active schedule rule, user is removed + check("User EP Schedule Remove Override", ` + escalation_policies: [{ id_placeholder: "ep", name: "generated", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: schedule, target_id: "s" }] }] + services: [{id_placeholder: "svc", description: "ok", name: "generated", escalation_policy_id: "ep"}] + schedules: [{id_placeholder: "s", time_zone: "UTC", name: "generated", description: "1"}] + schedule_rules: [{target:{target_type:user, target_id:"u2"}, start:"00:00", end:"23:59", schedule_id: "s", sunday: true, monday:true, tuesday:true, wednesday: true, thursday: true, friday: true, saturday: true}] + user_overrides: [{remove_user_id: "u2", start_time: "1006-01-02T15:04:05Z", end_time: "4006-01-02T15:04:05Z", target_type: schedule, target_id: "s"}] + `, false, false) + +} diff --git a/smoketest/graphqloncallassignments_test.go b/smoketest/graphqloncallassignments_test.go new file mode 100644 index 0000000000..15a73f58e4 --- /dev/null +++ b/smoketest/graphqloncallassignments_test.go @@ -0,0 +1,490 @@ +package smoketest + +import ( + "encoding/json" + "fmt" + "github.com/target/goalert/engine/resolver" + "github.com/target/goalert/smoketest/harness" + "strings" + "testing" +) + +// TestGraphQLOnCallAssignments tests the logic behind `User.is_on_call`. +func TestGraphQLOnCallAssignments(t *testing.T) { + t.Parallel() + + h := harness.NewHarness(t, "", "escalation-policy-step-reorder") + defer h.Close() + + doQL := func(t *testing.T, silent bool, query string, res interface{}) { + g := h.GraphQLQueryT(t, query, "/v1/graphql") + for _, err := range g.Errors { + t.Error("GraphQL Error:", err.Message) + } + if len(g.Errors) > 0 { + t.Fatal("errors returned from GraphQL") + } + if !silent { + t.Log("Response:", string(g.Data)) + } + + if res == nil { + return + } + err := json.Unmarshal(g.Data, &res) + if err != nil { + t.Fatal(err) + } + } + + type asnID struct { + Svc, EP, Rot, Sched string + Step int + } + + getID := func(a resolver.OnCallAssignment) asnID { + return asnID{ + Svc: a.ServiceName, + EP: a.EPName, + Rot: a.RotationName, + Sched: a.ScheduleName, + Step: a.Level, + } + } + + var idCounter int + check := func(name, input string, user1OnCall, user2OnCall []resolver.OnCallAssignment) { + u1 := h.CreateUser() + u2 := h.CreateUser() + rep := strings.NewReplacer( + "generatedA", fmt.Sprintf("generatedA%d", idCounter), + "generatedB", fmt.Sprintf("generatedB%d", idCounter), + ) + idCounter++ + + for i, oc := range user1OnCall { + oc.EPName = rep.Replace(oc.EPName) + oc.RotationName = rep.Replace(oc.RotationName) + oc.ScheduleName = rep.Replace(oc.ScheduleName) + oc.ServiceName = rep.Replace(oc.ServiceName) + user1OnCall[i] = oc + } + + for i, oc := range user2OnCall { + oc.EPName = rep.Replace(oc.EPName) + oc.RotationName = rep.Replace(oc.RotationName) + oc.ScheduleName = rep.Replace(oc.ScheduleName) + oc.ServiceName = rep.Replace(oc.ServiceName) + user2OnCall[i] = oc + } + + input = strings.Replace(input, "u1", u1.ID, -1) + input = strings.Replace(input, "u2", u2.ID, -1) + input = rep.Replace(input) + query := fmt.Sprintf(` + mutation { + createAll(input:{ + %s + }) { + services {id} + escalation_policies {id} + rotations {id} + user_overrides {id} + schedules {id} + } + } + `, input) + t.Run(name, func(t *testing.T) { + + var resp struct { + CreateAll map[string][]struct{ ID string } + } + doQL(t, false, query, &resp) + h.Trigger() + + var onCall struct { + User struct { + OnCallAssignments []resolver.OnCallAssignment `json:"on_call_assignments"` + } + } + + var hasFailure bool + + checkUser := func(name, uid string) { + + t.Run("User_"+name, func(t *testing.T) { + doQL(t, false, fmt.Sprintf(` + query { + user(id: "%s") { on_call_assignments{ + escalation_policy_name + escalation_policy_step_number + is_active + rotation_name + schedule_name + service_name + user_id + } } + + } + `, uid), &onCall) + + m := make(map[asnID]resolver.OnCallAssignment, len(onCall.User.OnCallAssignments)) + checked := make(map[asnID]bool) + for _, a := range onCall.User.OnCallAssignments { + m[getID(a)] = a + } + var asn []resolver.OnCallAssignment + switch name { + case "u1": + asn = user1OnCall + case "u2": + asn = user2OnCall + } + + for _, a := range asn { + id := getID(a) + checked[id] = true + resp, ok := m[id] + if !ok { + hasFailure = true + t.Errorf("got nil, want assignment %+v", id) + continue + } + + if resp.UserID != uid { + hasFailure = true + t.Errorf("Bad UserID for %+v: got %s; want %s", id, resp.UserID, uid) + } + + if resp.IsActive != a.IsActive { + hasFailure = true + t.Errorf("Wrong active state for %+v: got %t; want %t", id, resp.IsActive, a.IsActive) + } + } + for aID := range m { + if checked[aID] { + continue + } + hasFailure = true + t.Errorf("got unexpected assignment: %+v", aID) + } + }) + } + + checkUser("u1", u1.ID) + checkUser("u2", u2.ID) + + if hasFailure { + t.Fatal() + } + + }) + } + + // User directly on EP is always on call + check("User EP Direct", ` + escalation_policies: [{ id_placeholder: "ep", name: "generatedA", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: user, target_id: "u1" }] }] + services: [{id_placeholder: "svc", description: "ok", name: "generatedA", escalation_policy_id: "ep"}] + `, + []resolver.OnCallAssignment{ + {ServiceName: "generatedA", EPName: "generatedA", Level: 0, IsActive: true}, + }, + nil, + ) + + // Active participant directly on EP is always on call + check("User EP Rotation Direct", ` + escalation_policies: [{ id_placeholder: "ep", name: "generatedA", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: rotation, target_id: "rot" }] }] + services: [{id_placeholder: "svc", description: "ok", name: "generatedA", escalation_policy_id: "ep"}] + rotations: [{id_placeholder: "rot", time_zone: "UTC", shift_length: 1, type: weekly, start: "2006-01-02T15:04:05Z", name: "generatedA", description: "1"}] + rotation_participants: [{rotation_id: "rot", user_id: "u1"}] + `, + []resolver.OnCallAssignment{ + {ServiceName: "generatedA", EPName: "generatedA", RotationName: "generatedA", Level: 0, IsActive: true}, + }, + nil, + ) + + // Active participant directly on EP is always on call, rotation directly on EP but no participant, user has no assignments + check("Only One User EP Rotation Direct", ` + escalation_policies: [{ id_placeholder: "ep", name: "generatedA", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: rotation, target_id: "rot" }, {target_type: rotation, target_id: "rot2" }] }] + services: [{id_placeholder: "svc", description: "ok", name: "generatedA", escalation_policy_id: "ep"}] + rotations: [{id_placeholder: "rot", time_zone: "UTC", shift_length: 1, type: weekly, start: "3006-01-02T15:04:05Z", name: "generatedA", description: "1"}, + {id_placeholder: "rot2", time_zone: "UTC", shift_length: 1, type: weekly, start: "2016-01-02T15:04:05Z", name: "generatedB", description: "2"} ] + rotation_participants: [{rotation_id: "rot2", user_id: "u2"}] + `, + nil, + []resolver.OnCallAssignment{ + {ServiceName: "generatedA", EPName: "generatedA", RotationName: "generatedB", Level: 0, IsActive: true}, + }, + ) + + // Different users on different rotations, users are on call but with different assignment rotations + check("Multiple Users EP Rotation Direct", ` + escalation_policies: [{ id_placeholder: "ep", name: "generatedA", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: rotation, target_id: "rot" }, {target_type: rotation, target_id: "rot2" }] }] + services: [{id_placeholder: "svc", description: "ok", name: "generatedA", escalation_policy_id: "ep"}] + rotations: [{id_placeholder: "rot", time_zone: "UTC", shift_length: 1, type: weekly, start: "2006-01-02T15:04:05Z", name: "generatedA", description: "1"}, + {id_placeholder: "rot2", time_zone: "UTC", shift_length: 1, type: weekly, start: "2016-01-02T15:04:05Z", name: "generatedB", description: "2"} ] + rotation_participants: [{rotation_id: "rot", user_id: "u1"}, {rotation_id: "rot2", user_id: "u2"}] + `, + []resolver.OnCallAssignment{ + {ServiceName: "generatedA", EPName: "generatedA", RotationName: "generatedA", Level: 0, IsActive: true}, + }, + []resolver.OnCallAssignment{ + {ServiceName: "generatedA", EPName: "generatedA", RotationName: "generatedB", Level: 0, IsActive: true}, + }, + ) + + // EP -> Schedule, where there is an active ADD for a user + check("User EP Schedule Add Override", ` + escalation_policies: [{ id_placeholder: "ep", name: "generatedA", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: schedule, target_id: "s" }] }] + services: [{id_placeholder: "svc", description: "ok", name: "generatedA", escalation_policy_id: "ep"}] + schedules: [{id_placeholder: "s", time_zone: "UTC", name: "generatedA", description: "1"}] + user_overrides: [{add_user_id: "u1", start_time: "1006-01-02T15:04:05Z", end_time: "4006-01-02T15:04:05Z", target_type: schedule, target_id: "s"}] + `, + []resolver.OnCallAssignment{ + {ServiceName: "generatedA", EPName: "generatedA", ScheduleName: "generatedA", Level: 0, IsActive: true}, + }, + nil, + ) + + // EP -> Schedule, where there is an inactive ADD for a user + check("User EP Schedule Inactive Add Override", ` + escalation_policies: [{ id_placeholder: "ep", name: "generatedA", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: schedule, target_id: "s" }] }] + services: [{id_placeholder: "svc", description: "ok", name: "generatedA", escalation_policy_id: "ep"}] + schedules: [{id_placeholder: "s", time_zone: "UTC", name: "generatedA", description: "1"}] + user_overrides: [{add_user_id: "u1", start_time: "3006-01-02T15:04:05Z", end_time: "4006-01-02T15:04:05Z", target_type: schedule, target_id: "s"}] + `, + []resolver.OnCallAssignment{ + {ServiceName: "generatedA", EPName: "generatedA", ScheduleName: "generatedA", Level: 0, IsActive: false}, + }, + nil, + ) + + // Active schedule rule, user is replaced + check("User EP Schedule Replace Override", ` + escalation_policies: [{ id_placeholder: "ep", name: "generatedA", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: schedule, target_id: "s" }] }] + services: [{id_placeholder: "svc", description: "ok", name: "generatedA", escalation_policy_id: "ep"}] + schedules: [{id_placeholder: "s", time_zone: "UTC", name: "generatedA", description: "1"}] + schedule_rules: [{target:{target_type:user, target_id:"u2"}, start:"00:00", end:"23:59", schedule_id: "s", sunday: true, monday:true, tuesday:true, wednesday: true, thursday: true, friday: true, saturday: true}] + user_overrides: [{add_user_id: "u1", remove_user_id: "u2", start_time: "1006-01-02T15:04:05Z", end_time: "4006-01-02T15:04:05Z", target_type: schedule, target_id: "s"}] + `, + []resolver.OnCallAssignment{ + {ServiceName: "generatedA", EPName: "generatedA", ScheduleName: "generatedA", Level: 0, IsActive: true}, + }, + []resolver.OnCallAssignment{ + {ServiceName: "generatedA", EPName: "generatedA", ScheduleName: "generatedA", Level: 0, IsActive: false}, + }, + ) + + // Active schedule rule, user is replaced but in the future (inactive replacement) + check("User EP Schedule Inactive Replace Override", ` + escalation_policies: [{ id_placeholder: "ep", name: "generatedA", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: schedule, target_id: "s" }] }] + services: [{id_placeholder: "svc", description: "ok", name: "generatedA", escalation_policy_id: "ep"}] + schedules: [{id_placeholder: "s", time_zone: "UTC", name: "generatedA", description: "1"}] + schedule_rules: [{target:{target_type:user, target_id:"u2"}, start:"00:00", end:"23:59", schedule_id: "s", sunday: true, monday:true, tuesday:true, wednesday: true, thursday: true, friday: true, saturday: true}] + user_overrides: [{add_user_id: "u1", remove_user_id: "u2", start_time: "3006-01-02T15:04:05Z", end_time: "4006-01-02T15:04:05Z", target_type: schedule, target_id: "s"}] + `, + []resolver.OnCallAssignment{ + {ServiceName: "generatedA", EPName: "generatedA", ScheduleName: "generatedA", Level: 0, IsActive: false}, + }, + []resolver.OnCallAssignment{ + {ServiceName: "generatedA", EPName: "generatedA", ScheduleName: "generatedA", Level: 0, IsActive: true}, + }, + ) + + // Same scenario, user is NOT replaced (no override) + check("User EP Schedule Replace Override Absent", ` + escalation_policies: [{ id_placeholder: "ep", name: "generatedA", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: schedule, target_id: "s" }] }] + services: [{id_placeholder: "svc", description: "ok", name: "generatedA", escalation_policy_id: "ep"}] + schedules: [{id_placeholder: "s", time_zone: "UTC", name: "generatedA", description: "1"}] + schedule_rules: [{target:{target_type:user, target_id:"u2"}, start:"00:00", end:"23:59", schedule_id: "s", sunday: true, monday:true, tuesday:true, wednesday: true, thursday: true, friday: true, saturday: true}] + `, + []resolver.OnCallAssignment{}, + []resolver.OnCallAssignment{ + {ServiceName: "generatedA", EPName: "generatedA", ScheduleName: "generatedA", Level: 0, IsActive: true}, + }, + ) + + // Same scenario, user is NOT replaced (no override), inactive schedule rule + check("User EP Schedule No Days Replace Override Absent", ` + escalation_policies: [{ id_placeholder: "ep", name: "generatedA", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: schedule, target_id: "s" }] }] + services: [{id_placeholder: "svc", description: "ok", name: "generatedA", escalation_policy_id: "ep"}] + schedules: [{id_placeholder: "s", time_zone: "UTC", name: "generatedA", description: "1"}] + schedule_rules: [{target:{target_type:user, target_id:"u2"}, start:"00:00", end:"23:59", schedule_id: "s", sunday: false, monday:false, tuesday:false, wednesday: false, thursday: false, friday: false, saturday: false}] + `, + []resolver.OnCallAssignment{}, + []resolver.OnCallAssignment{ + {ServiceName: "generatedA", EPName: "generatedA", ScheduleName: "generatedA", Level: 0, IsActive: false}, + }, + ) + + // Active schedule rule, active rotation participant is replaced + check("User EP Schedule Replace Rotation Override", ` + escalation_policies: [{ id_placeholder: "ep", name: "generatedA", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: schedule, target_id: "s" }] }] + services: [{id_placeholder: "svc", description: "ok", name: "generatedA", escalation_policy_id: "ep"}] + schedules: [{id_placeholder: "s", time_zone: "UTC", name: "generatedA", description: "1"}] + schedule_rules: [{target:{target_type:rotation, target_id:"rot"}, start:"00:00", end:"23:59", schedule_id: "s", sunday: true, monday:true, tuesday:true, wednesday: true, thursday: true, friday: true, saturday: true}] + rotations: [{id_placeholder: "rot", time_zone: "UTC", shift_length: 1, type: weekly, start: "2006-01-02T15:04:05Z", name: "generatedA", description: "1"}] + rotation_participants: [{rotation_id: "rot", user_id: "u2"}] + user_overrides: [{add_user_id: "u1", remove_user_id: "u2", start_time: "1006-01-02T15:04:05Z", end_time: "4006-01-02T15:04:05Z", target_type: schedule, target_id: "s"}] + `, + []resolver.OnCallAssignment{ + {ServiceName: "generatedA", EPName: "generatedA", ScheduleName: "generatedA", Level: 0, IsActive: true}, + }, + []resolver.OnCallAssignment{ + {ServiceName: "generatedA", EPName: "generatedA", ScheduleName: "generatedA", Level: 0, IsActive: false}, + }, + ) + + // Active schedule rule, active rotation participant is replaced + check("User EP Schedule Replace Rotation Override", ` + escalation_policies: [{ id_placeholder: "ep", name: "generatedA", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: schedule, target_id: "s" }] }] + services: [{id_placeholder: "svc", description: "ok", name: "generatedA", escalation_policy_id: "ep"}] + schedules: [{id_placeholder: "s", time_zone: "UTC", name: "generatedA", description: "1"}] + schedule_rules: [{target:{target_type:rotation, target_id:"rot"}, start:"00:00", end:"23:59", schedule_id: "s", sunday: true, monday:true, tuesday:true, wednesday: true, thursday: true, friday: true, saturday: true}] + rotations: [{id_placeholder: "rot", time_zone: "UTC", shift_length: 1, type: weekly, start: "2006-01-02T15:04:05Z", name: "generatedA", description: "1"}] + rotation_participants: [{rotation_id: "rot", user_id: "u2"}] + user_overrides: [{add_user_id: "u1", remove_user_id: "u2", start_time: "1006-01-02T15:04:05Z", end_time: "4006-01-02T15:04:05Z", target_type: schedule, target_id: "s"}] +`, + []resolver.OnCallAssignment{ + {ServiceName: "generatedA", EPName: "generatedA", ScheduleName: "generatedA", Level: 0, IsActive: true}, + }, + []resolver.OnCallAssignment{ + {ServiceName: "generatedA", EPName: "generatedA", ScheduleName: "generatedA", Level: 0, IsActive: false}, + }, + ) + + // Active schedule rule, active rotation participant is replaced with an inactive replace override + check("User EP Schedule Replace Rotation Override (Inactive)", ` + escalation_policies: [{ id_placeholder: "ep", name: "generatedA", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: schedule, target_id: "s" }] }] + services: [{id_placeholder: "svc", description: "ok", name: "generatedA", escalation_policy_id: "ep"}] + schedules: [{id_placeholder: "s", time_zone: "UTC", name: "generatedA", description: "1"}] + schedule_rules: [{target:{target_type:rotation, target_id:"rot"}, start:"00:00", end:"23:59", schedule_id: "s", sunday: true, monday:true, tuesday:true, wednesday: true, thursday: true, friday: true, saturday: true}] + rotations: [{id_placeholder: "rot", time_zone: "UTC", shift_length: 1, type: weekly, start: "2006-01-02T15:04:05Z", name: "generatedA", description: "1"}] + rotation_participants: [{rotation_id: "rot", user_id: "u2"}] + user_overrides: [{add_user_id: "u1", remove_user_id: "u2", start_time: "3006-01-02T15:04:05Z", end_time: "4006-01-02T15:04:05Z", target_type: schedule, target_id: "s"}] + `, + []resolver.OnCallAssignment{ + {ServiceName: "generatedA", EPName: "generatedA", ScheduleName: "generatedA", Level: 0, IsActive: false}, + }, + []resolver.OnCallAssignment{ + {ServiceName: "generatedA", EPName: "generatedA", ScheduleName: "generatedA", Level: 0, IsActive: true}, + }, + ) + + // Same as above, but no service assignment + check("User EP Schedule Replace Rotation Override No Service", ` + escalation_policies: [{ id_placeholder: "ep", name: "generatedA", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: schedule, target_id: "s" }] }] + schedules: [{id_placeholder: "s", time_zone: "UTC", name: "generatedA", description: "1"}] + schedule_rules: [{target:{target_type:rotation, target_id:"rot"}, start:"00:00", end:"23:59", schedule_id: "s", sunday: true, monday:true, tuesday:true, wednesday: true, thursday: true, friday: true, saturday: true}] + rotations: [{id_placeholder: "rot", time_zone: "UTC", shift_length: 1, type: weekly, start: "2006-01-02T15:04:05Z", name: "generatedA", description: "1"}] + rotation_participants: [{rotation_id: "rot", user_id: "u2"}] + user_overrides: [{add_user_id: "u1", remove_user_id: "u2", start_time: "1006-01-02T15:04:05Z", end_time: "4006-01-02T15:04:05Z", target_type: schedule, target_id: "s"}] + `, + []resolver.OnCallAssignment{}, + []resolver.OnCallAssignment{}, + ) + + // Same as above, but 2 service assignments + check("User EP Schedule Replace Rotation Override Double Service", ` + escalation_policies: [{ id_placeholder: "ep", name: "generatedA", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: schedule, target_id: "s" }] }] + services: [{id_placeholder: "svc", description: "ok", name: "generatedA", escalation_policy_id: "ep"},{description: "ok", name: "generatedB", escalation_policy_id: "ep"}] + schedules: [{id_placeholder: "s", time_zone: "UTC", name: "generatedA", description: "1"}] + schedule_rules: [{target:{target_type:rotation, target_id:"rot"}, start:"00:00", end:"23:59", schedule_id: "s", sunday: true, monday:true, tuesday:true, wednesday: true, thursday: true, friday: true, saturday: true}] + rotations: [{id_placeholder: "rot", time_zone: "UTC", shift_length: 1, type: weekly, start: "2006-01-02T15:04:05Z", name: "generatedA", description: "1"}] + rotation_participants: [{rotation_id: "rot", user_id: "u2"}] + user_overrides: [{add_user_id: "u1", remove_user_id: "u2", start_time: "1006-01-02T15:04:05Z", end_time: "4006-01-02T15:04:05Z", target_type: schedule, target_id: "s"}] + `, + []resolver.OnCallAssignment{ + {ServiceName: "generatedA", EPName: "generatedA", ScheduleName: "generatedA", Level: 0, IsActive: true}, + {ServiceName: "generatedB", EPName: "generatedA", ScheduleName: "generatedA", Level: 0, IsActive: true}, + }, + []resolver.OnCallAssignment{ + {ServiceName: "generatedA", EPName: "generatedA", ScheduleName: "generatedA", Level: 0, IsActive: false}, + {ServiceName: "generatedB", EPName: "generatedA", ScheduleName: "generatedA", Level: 0, IsActive: false}, + }, + ) + + // Active schedule rule, active rotation participant is NOT replaced (no override) + check("User EP Schedule Replace Rotation Override Absent", ` + escalation_policies: [{ id_placeholder: "ep", name: "generatedA", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: schedule, target_id: "s" }] }] + services: [{id_placeholder: "svc", description: "ok", name: "generatedA", escalation_policy_id: "ep"}] + schedules: [{id_placeholder: "s", time_zone: "UTC", name: "generatedA", description: "1"}] + schedule_rules: [{target:{target_type:rotation, target_id:"rot"}, start:"00:00", end:"23:59", schedule_id: "s", sunday: true, monday:true, tuesday:true, wednesday: true, thursday: true, friday: true, saturday: true}] + rotations: [{id_placeholder: "rot", time_zone: "UTC", shift_length: 1, type: weekly, start: "2006-01-02T15:04:05Z", name: "generatedA", description: "1"}] + rotation_participants: [{rotation_id: "rot", user_id: "u2"}] + `, + []resolver.OnCallAssignment{}, + []resolver.OnCallAssignment{ + {ServiceName: "generatedA", EPName: "generatedA", ScheduleName: "generatedA", Level: 0, IsActive: true}, + }, + ) + + // Active schedule rule, active rotation participant is removed + check("User EP Schedule Remove Rotation Override", ` + escalation_policies: [{ id_placeholder: "ep", name: "generatedA", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: schedule, target_id: "s" }] }] + services: [{id_placeholder: "svc", description: "ok", name: "generatedA", escalation_policy_id: "ep"}] + schedules: [{id_placeholder: "s", time_zone: "UTC", name: "generatedA", description: "1"}] + schedule_rules: [{target:{target_type:rotation, target_id:"rot"}, start:"00:00", end:"23:59", schedule_id: "s", sunday: true, monday:true, tuesday:true, wednesday: true, thursday: true, friday: true, saturday: true}] + rotations: [{id_placeholder: "rot", time_zone: "UTC", shift_length: 1, type: weekly, start: "2006-01-02T15:04:05Z", name: "generatedA", description: "1"}] + rotation_participants: [{rotation_id: "rot", user_id: "u2"}] + user_overrides: [{ remove_user_id: "u2", start_time: "1006-01-02T15:04:05Z", end_time: "4006-01-02T15:04:05Z", target_type: schedule, target_id: "s"}] + `, + []resolver.OnCallAssignment{}, + []resolver.OnCallAssignment{ + {ServiceName: "generatedA", EPName: "generatedA", ScheduleName: "generatedA", Level: 0, IsActive: false}, + }, + ) + + // Active schedule rule, user is removed + check("User EP Schedule Remove Override", ` + escalation_policies: [{ id_placeholder: "ep", name: "generatedA", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: schedule, target_id: "s" }] }] + services: [{id_placeholder: "svc", description: "ok", name: "generatedA", escalation_policy_id: "ep"}] + schedules: [{id_placeholder: "s", time_zone: "UTC", name: "generatedA", description: "1"}] + schedule_rules: [{target:{target_type:user, target_id:"u2"}, start:"00:00", end:"23:59", schedule_id: "s", sunday: true, monday:true, tuesday:true, wednesday: true, thursday: true, friday: true, saturday: true}] + user_overrides: [{remove_user_id: "u2", start_time: "1006-01-02T15:04:05Z", end_time: "4006-01-02T15:04:05Z", target_type: schedule, target_id: "s"}] + `, + []resolver.OnCallAssignment{}, + []resolver.OnCallAssignment{ + {ServiceName: "generatedA", EPName: "generatedA", ScheduleName: "generatedA", Level: 0, IsActive: false}, + }, + ) + + // Multiple add overrides, active schedule rules + check("User EP Schedule Multiple Overrides", ` + escalation_policies: [{ id_placeholder: "ep", name: "generatedA", description: "1"}] + escalation_policy_steps: [{escalation_policy_id: "ep", delay_minutes: 1, targets: [{target_type: schedule, target_id: "s" }] }] + services: [{id_placeholder: "svc", description: "ok", name: "generatedA", escalation_policy_id: "ep"}] + schedules: [{id_placeholder: "s", time_zone: "UTC", name: "generatedA", description: "1"}] + schedule_rules: [{target:{target_type:user, target_id:"u2"}, start:"00:00", end:"23:59", schedule_id: "s", sunday: true, monday:true, tuesday:true, wednesday: true, thursday: true, friday: true, saturday: true}] + user_overrides: [{add_user_id: "u1", start_time: "1006-01-02T15:04:05Z", end_time: "4006-01-02T15:04:05Z", target_type: schedule, target_id: "s"}, + {add_user_id: "u2", start_time: "1006-01-02T15:04:05Z", end_time: "4006-01-02T15:04:05Z", target_type: schedule, target_id: "s"}] + `, + []resolver.OnCallAssignment{ + {ServiceName: "generatedA", EPName: "generatedA", ScheduleName: "generatedA", Level: 0, IsActive: true}, + }, + []resolver.OnCallAssignment{ + {ServiceName: "generatedA", EPName: "generatedA", ScheduleName: "generatedA", Level: 0, IsActive: true}, + }, + ) + +} diff --git a/smoketest/graphqlservicelabels_test.go b/smoketest/graphqlservicelabels_test.go new file mode 100644 index 0000000000..2ba2e0854b --- /dev/null +++ b/smoketest/graphqlservicelabels_test.go @@ -0,0 +1,58 @@ +package smoketest + +import ( + "fmt" + "github.com/target/goalert/smoketest/harness" + "testing" +) + +// TestGraphQLServiceLabels tests that labels for services can be created +// (currently only be created directly through db and not via GraphQL layer), +// edited and deleted. + +func TestGraphQLServiceLabels(t *testing.T) { + t.Parallel() + + // Insert initial one label into db + const sql = ` + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into labels (id, tgt_service_id, key, value) + values + ('1', {{uuid "sid"}}, 'foo/bar', 'testvalue'); +` + + h := harness.NewHarness(t, sql, "labels-switchover-trigger") + defer h.Close() + + doQL := func(query string) { + g := h.GraphQLQuery(query) + for _, err := range g.Errors { + t.Error("GraphQL Error:", err.Message) + } + if len(g.Errors) > 0 { + t.Fatal("errors returned from GraphQL") + } + t.Log("Response:", string(g.Data)) + } + + // Edit label + doQL(fmt.Sprintf(` + mutation { + setLabel(input:{ target_type: service ,target_id: "%s", key: "%s", value: "%s" }) + } + `, h.UUID("sid"), "foo/bar", "editedvalue")) + + // Delete label + doQL(fmt.Sprintf(` + mutation { + setLabel(input:{ target_type: service ,target_id: "%s", key: "%s", value: "%s" }) + } + `, h.UUID("sid"), "foo/bar", "")) + +} diff --git a/smoketest/graphqlupdaterotation_test.go b/smoketest/graphqlupdaterotation_test.go new file mode 100644 index 0000000000..ab9b130801 --- /dev/null +++ b/smoketest/graphqlupdaterotation_test.go @@ -0,0 +1,123 @@ +package smoketest + +import ( + "encoding/json" + "fmt" + "github.com/target/goalert/smoketest/harness" + "testing" + "time" +) + +// TestGraphQLUpdateRotation tests that all steps like creating and updating rotations are carried out without any errors. +func TestGraphQLUpdateRotation(t *testing.T) { + t.Parallel() + + const sql = ` + insert into users (id, name, email) + values + ({{uuid "u1"}}, 'bob', 'joe'), + ({{uuid "u2"}}, 'ben', 'josh'); +` + + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + doQL := func(query string, res interface{}) { + g := h.GraphQLQuery(query) + for _, err := range g.Errors { + t.Error("GraphQL Error:", err.Message) + } + if len(g.Errors) > 0 { + t.Fatal("errors returned from GraphQL") + } + t.Log("Response:", string(g.Data)) + if res == nil { + return + } + err := json.Unmarshal(g.Data, &res) + if err != nil { + t.Fatal("failed to parse response:", err) + } + } + + var sched struct { + CreateSchedule struct { + ID string + Rotations []struct{ ID string } + } + } + doQL(fmt.Sprintf(` + mutation { + createSchedule(input:{ + name: "default", + description: "default testing", + time_zone: "America/Chicago", + default_rotation: { + type: daily, + start_time: "%s", + shift_length:1, + } + }){ + id + rotations { + id + } + } + } + + `, time.Now().Format(time.RFC3339)), &sched) + + sID := sched.CreateSchedule.ID + var rotation struct { + CreateOrUpdateRotation struct { + Rotation struct { + ID string + Name string + } + } + } + doQL(fmt.Sprintf(` + mutation { + createOrUpdateRotation(input:{ + id: "%s", + name: "default", + start: "2017-08-15T19:00:00Z", + type: daily, + shift_length: 2, + schedule_id: "%s" + }){ + rotation { + id + name + } + } + } + + `, sched.CreateSchedule.Rotations[0].ID, sID), &rotation) + + var newSched struct { + Schedule struct { + Rotations []struct { + ShiftLength int `json:"shift_length"` + } + } + } + doQL(fmt.Sprintf(` + query { + schedule(id: "%s") { + rotations { + id + shift_length + } + } + } + + `, sID), &newSched) + + if len(newSched.Schedule.Rotations) != 1 { + t.Errorf("got %d rotations; want 1", len(newSched.Schedule.Rotations)) + } + if newSched.Schedule.Rotations[0].ShiftLength != 2 { + t.Errorf("got shift_length of %d; want 2", newSched.Schedule.Rotations[0].ShiftLength) + } +} diff --git a/smoketest/graphqluserfavorites_test.go b/smoketest/graphqluserfavorites_test.go new file mode 100644 index 0000000000..cb2cc5218b --- /dev/null +++ b/smoketest/graphqluserfavorites_test.go @@ -0,0 +1,122 @@ +package smoketest + +import ( + "encoding/json" + "fmt" + "github.com/target/goalert/smoketest/harness" + "testing" +) + +// TestGraphQLUserFavorites tests that services can be set and unset as user favorites +func TestGraphQLUserFavorites(t *testing.T) { + t.Parallel() + + sql := ` + insert into users (id, name, email) + values + ({{uuid "user"}}, 'bob', 'joe'); + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid1"}}, {{uuid "eid"}}, 'service1'), + ({{uuid "sid2"}}, {{uuid "eid"}}, 'service2'); +` + + h := harness.NewHarness(t, sql, "UserFavorites") + defer h.Close() + + doQL := func(t *testing.T, query string, res interface{}) { + g := h.GraphQLQueryT(t, query, "/v1/graphql") + for _, err := range g.Errors { + t.Error("GraphQL Error:", err.Message) + } + if len(g.Errors) > 0 { + t.Fatal("errors returned from GraphQL") + } + t.Log("Response:", string(g.Data)) + + if res == nil { + return + } + err := json.Unmarshal(g.Data, &res) + if err != nil { + t.Fatal(err) + } + } + + doQL(t, fmt.Sprintf(` + mutation { + setUserFavorite (input: { + target_type: service ,target_id: "%s" + }) { + target_id + } + } + `, h.UUID("sid1")), nil) + + var s struct { + Service struct { + IsUserFav bool `json:"is_user_favorite"` + } + } + + doQL(t, fmt.Sprintf(` + query { + service(id: "%s") { + is_user_favorite + } + } + `, h.UUID("sid1")), &s) + + if s.Service.IsUserFav != true { + t.Fatalf("ERROR: ServiceID %s IsUserFavorite=%t; want true", h.UUID("sid1"), s.Service.IsUserFav) + } + + doQL(t, fmt.Sprintf(` + query { + service(id: "%s") { + is_user_favorite + } + } + `, h.UUID("sid2")), &s) + + if s.Service.IsUserFav != false { + t.Fatalf("ERROR: ServiceID %s IsUserFavorite=%t; want false", h.UUID("sid2"), s.Service.IsUserFav) + } + + // Again Setting as user-favorite should result in no change + doQL(t, fmt.Sprintf(` + mutation { + setUserFavorite (input: { + target_type: service ,target_id: "%s" + }) { + target_id + } + } + `, h.UUID("sid2")), nil) + + doQL(t, fmt.Sprintf(` + mutation { + unsetUserFavorite (input: { + target_type: service ,target_id: "%s" + }) { + target_id + } + } + `, h.UUID("sid2")), nil) + + doQL(t, fmt.Sprintf(` + query { + service(id: "%s") { + is_user_favorite + } + } + `, h.UUID("sid2")), &s) + + if s.Service.IsUserFav != false { + t.Fatalf("ERROR: ServiceID %s IsUserFavorite=%t; want false", h.UUID("sid2"), s.Service.IsUserFav) + } + +} diff --git a/smoketest/graphqlusers_test.go b/smoketest/graphqlusers_test.go new file mode 100644 index 0000000000..4a24cd0aa6 --- /dev/null +++ b/smoketest/graphqlusers_test.go @@ -0,0 +1,58 @@ +package smoketest + +import ( + "encoding/json" + "github.com/target/goalert/smoketest/harness" + "github.com/target/goalert/user" + "testing" +) + +// TestGraphQLUsers tests that listing users works properly. +func TestGraphQLUsers(t *testing.T) { + t.Parallel() + + const sql = ` + insert into users (id, name, email, role) + values + ({{uuid "u1"}}, 'bob', 'joe', 'user'), + ({{uuid "u2"}}, 'ben', 'josh', 'user'); +` + + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + doQL := func(query string, res interface{}) { + g := h.GraphQLQuery(query) + for _, err := range g.Errors { + t.Error("GraphQL Error:", err.Message) + } + if len(g.Errors) > 0 { + t.Fatal("errors returned from GraphQL") + } + t.Log("Response:", string(g.Data)) + if res == nil { + return + } + err := json.Unmarshal(g.Data, &res) + if err != nil { + t.Fatal("failed to parse response:", err) + } + } + + var res struct { + Users []user.User + } + doQL(` + query { + users { + id + name + role + } + } + `, &res) + if len(res.Users) != 3 { + // 3 because the 'GraphQL User' will be implicitly added. + t.Errorf("got %d users; want 3", len(res.Users)) + } +} diff --git a/smoketest/harness/backend.go b/smoketest/harness/backend.go new file mode 100644 index 0000000000..60e868e0b2 --- /dev/null +++ b/smoketest/harness/backend.go @@ -0,0 +1,70 @@ +package harness + +import ( + "encoding/json" + "io" + "strings" +) + +func (h *Harness) watchBackendLogs(r io.Reader, urlCh chan string) { + defer close(urlCh) + dec := json.NewDecoder(r) + var entry struct { + Error string + Message string `json:"msg"` + Source string + Level string + ProviderType json.Number + URL string + } + + ignore := func(msg string) bool { + h.mx.Lock() + defer h.mx.Unlock() + for _, s := range h.ignoreErrors { + if strings.Contains(msg, s) { + return true + } + } + return false + } + var err error + var sent bool + for { + err = dec.Decode(&entry) + if err != nil { + break + } + if ignore(entry.Error) { + entry.Level = "ignore[" + entry.Level + "]" + } + if entry.Level == "error" || entry.Level == "fatal" { + h.t.Errorf("Backend: %s(%s) %s", strings.ToUpper(entry.Level), entry.Source, entry.Error) + continue + } else { + h.t.Logf("Backend: %s %s", strings.ToUpper(entry.Level), entry.Message) + } + if !sent && entry.URL != "" { + sent = true + urlCh <- entry.URL + } + } + if h.isClosing() { + return + } + data := make([]byte, 32768) + n, _ := dec.Buffered().Read(data) + nx, _ := r.Read(data[n:]) + if n+nx > 0 { + h.t.Logf("Buffered: %s", string(data[:n+nx])) + } + + h.t.Errorf("failed to read/parse JSON logs: %v", err) +} +func (h *Harness) watchBackend(c io.Closer) { + defer c.Close() + err := h.cmd.Wait() + if err != nil && !h.isClosing() { + h.t.Errorf("backend failed: %v", err) + } +} diff --git a/smoketest/harness/datagen.go b/smoketest/harness/datagen.go new file mode 100644 index 0000000000..c4e8bd2b2f --- /dev/null +++ b/smoketest/harness/datagen.go @@ -0,0 +1,110 @@ +package harness + +import ( + "fmt" + "math/rand" + "strconv" + "strings" + "sync" + "testing" + + "github.com/pkg/errors" + uuid "github.com/satori/go.uuid" + "github.com/ttacon/libphonenumber" +) + +// DataGen handles generating random data for tests. It ties arbitrary ids to +// generated values so they can be re-used during a test. +type DataGen struct { + data map[dataGenKey]string + uniq map[dataGenKey]bool + mx sync.Mutex + g Generator + t *testing.T + name string +} + +type DataGenFunc func() string + +type DataGenArgFunc func(string) string + +type Generator interface { + Generate(string) string +} + +func (d DataGenFunc) Generate(string) string { + return d() +} + +func (d DataGenArgFunc) Generate(a string) string { + return d(a) +} + +// NewDataGen will create a new data generator. fn should return a new/unique string each time +func NewDataGen(t *testing.T, name string, g Generator) *DataGen { + return &DataGen{ + data: make(map[dataGenKey]string), + uniq: make(map[dataGenKey]bool), + g: g, + t: t, + name: name, + } +} + +type dataGenKey struct{ arg, id string } + +// Get returns the value associated with id. The first time an id is provided, +// a new value is generated. If id is empty, a new value will always be returned. +func (d *DataGen) Get(id string) string { + return d.GetWithArg("", id) +} + +// GetWithArg returns the value associated with id. The first time an id is provided, +// a new value is generated. If id is empty, a new value will always be returned. +func (d *DataGen) GetWithArg(arg, id string) string { + d.mx.Lock() + defer d.mx.Unlock() + if id == "" { + return d.g.Generate(arg) + } + key := dataGenKey{arg: arg, id: id} + val := dataGenKey{arg: arg, id: ""} + var ok bool + val.id, ok = d.data[key] + if !ok { + val.id = d.g.Generate(arg) + for d.uniq[val] { + val.id = d.g.Generate(arg) + } + d.uniq[val] = true + d.t.Logf(`%s("%s") = "%s"`, d.name, id, val.id) + d.data[key] = val.id + } + + return val.id +} + +// GenUUID will return a random UUID. +func GenUUID() string { + return uuid.NewV4().String() +} + +// GenPhone will return a random phone number +func GenPhone() string { + return GenPhoneCC("+1") +} + +// GenPhoneCC will return a random phone number with supplied country code +func GenPhoneCC(cc string) string { + ccInt, err := strconv.Atoi(strings.TrimPrefix(cc, "+")) + if err != nil { + panic(errors.Wrapf(err, "parse country code '%s'", cc)) + } + region := libphonenumber.GetRegionCodeForCountryCode(ccInt) + if region == "" || region == "ZZ" { + panic(fmt.Sprintf("invalid cc '%s'", cc)) + } + num := libphonenumber.GetExampleNumber(region) + *num.NationalNumber = *num.NationalNumber + uint64(rand.Intn(9999)) + return libphonenumber.Format(num, libphonenumber.E164) +} diff --git a/smoketest/harness/graphql.go b/smoketest/harness/graphql.go new file mode 100644 index 0000000000..0d21a71c0b --- /dev/null +++ b/smoketest/harness/graphql.go @@ -0,0 +1,95 @@ +package harness + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "strings" + "testing" + + "github.com/pkg/errors" + "github.com/target/goalert/auth" + "github.com/target/goalert/permission" + "github.com/target/goalert/user" +) + +func (h *Harness) insertGraphQLUser() { + h.t.Helper() + var err error + permission.SudoContext(context.Background(), func(ctx context.Context) { + _, err = h.usr.Insert(ctx, &user.User{ + Name: "GraphQL User", + ID: "bcefacc0-4764-012d-7bfb-002500d5decb", + Role: permission.RoleAdmin, + }) + }) + if err != nil { + h.t.Fatal(errors.Wrap(err, "create GraphQL user")) + } + + h.sessToken, _, err = h.authH.CreateSession(context.Background(), "goalert-smoketest", "bcefacc0-4764-012d-7bfb-002500d5decb") + if err != nil { + h.t.Fatal(errors.Wrap(err, "create auth session")) + } +} + +// GraphQLQuery will perform a GraphQL query against the backend, internally +// handling authentication. Queries are performed with Admin role. +func (h *Harness) GraphQLQuery(query string) *QLResponse { + h.t.Helper() + return h.GraphQLQueryT(h.t, query, "/v1/graphql") +} + +// GraphQLQuery2 will perform a GraphQL2 query against the backend, internally +// handling authentication. Queries are performed with Admin role. +func (h *Harness) GraphQLQuery2(query string) *QLResponse { + h.t.Helper() + return h.GraphQLQueryT(h.t, query, "/v1/graphql2") +} + +// GraphQLQueryT will perform a GraphQL query against the backend, internally +// handling authentication. Queries are performed with Admin role. +func (h *Harness) GraphQLQueryT(t *testing.T, query string, u string) *QLResponse { + t.Helper() + h.addGraphUser.Do(h.insertGraphQLUser) + query = strings.Replace(query, "\t", "", -1) + q := struct{ Query string }{Query: query} + + data, err := json.Marshal(q) + if err != nil { + h.t.Fatal("failed to marshal graphql query") + } + t.Log("Query:", query) + + url := h.URL() + u + req, err := http.NewRequest("POST", url, bytes.NewBuffer(data)) + if err != nil { + t.Fatal("failed to make request:", err) + } + req.AddCookie(&http.Cookie{ + Name: auth.CookieName, + Value: h.sessToken, + }) + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatal("failed to make http request:", err) + } + defer resp.Body.Close() + if resp.StatusCode != 200 { + t.Fatal("failed to make graphql request:", resp.Status) + } + + var r QLResponse + err = json.NewDecoder(resp.Body).Decode(&r) + if err != nil { + t.Fatal("failed to parse GraphQL response:", err) + } + return &r +} + +// QLResponse is a generic GraphQL response. +type QLResponse struct { + Data json.RawMessage + Errors []struct{ Message string } +} diff --git a/smoketest/harness/harness.go b/smoketest/harness/harness.go new file mode 100644 index 0000000000..72e24e0459 --- /dev/null +++ b/smoketest/harness/harness.go @@ -0,0 +1,645 @@ +package harness + +import ( + "bytes" + "context" + "database/sql" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "os" + "os/exec" + "path/filepath" + "reflect" + "sort" + "strings" + "sync" + "testing" + "text/template" + "time" + + "github.com/lib/pq" + _ "github.com/lib/pq" // load postgres driver + "github.com/pkg/errors" + uuid "github.com/satori/go.uuid" + "github.com/target/goalert/alert" + alertlog "github.com/target/goalert/alert/log" + "github.com/target/goalert/auth" + "github.com/target/goalert/config" + "github.com/target/goalert/devtools/mockslack" + "github.com/target/goalert/devtools/mocktwilio" + "github.com/target/goalert/keyring" + "github.com/target/goalert/permission" + "github.com/target/goalert/user" + "github.com/target/goalert/user/notificationrule" +) + +const dbTimeFormat = "2006-01-02 15:04:05.999999-07:00" + +// DBURL will return the URL for the given dbName. +// +// If DB_URL is set, it will be used. +func DBURL(dbName string) string { + env := os.Getenv("DB_URL") + if env != "" && dbName == "" { + return env + } + if env == "" { + env = "postgres://goalert@127.0.0.1:5432?sslmode=disable" + } + u, err := url.Parse(env) + if err != nil { + panic(err) + } + u.Path = dbName + + return u.String() +} + +// Harness is a helper for smoketests. It deals with assertions, database management, and backend monitoring during tests. +type Harness struct { + phoneG, phoneCCG, uuidG *DataGen + t *testing.T + closing bool + + tw *twServer + twS *httptest.Server + + slack *slackServer + slackS *httptest.Server + slackApp mockslack.AppInfo + slackUser mockslack.UserInfo + + ignoreErrors []string + + cmd *exec.Cmd + + backendURL string + dbURL string + dbName string + delayOffset time.Duration + mx sync.Mutex + + start time.Time + resumed time.Time + lastTimeChange time.Time + pgResume time.Time + + db *sql.DB + nr notificationrule.Store + a alert.Store + + usr user.Store + userGeneratedIndex int + addGraphUser sync.Once + + sessToken string + sessKey keyring.Keyring + authH *auth.Handler +} + +func runCmd(t *testing.T, c *exec.Cmd) { + t.Helper() + data, err := c.CombinedOutput() + t.Log(string(data)) + if err != nil { + t.Fatalf("failed to run '%s %s': %v", c.Path, strings.Join(c.Args, " "), err) + } +} + +// NewHarness will create a new database, perform `migrateSteps` migrations, inject `initSQL` and return a new Harness bound to +// the result. It starts a backend process pre-configured to a mock twilio server for monitoring notifications as well. +func NewHarness(t *testing.T, initSQL, migrationName string) *Harness { + t.Helper() + h := NewStoppedHarness(t, initSQL, migrationName) + h.Start() + return h +} + +// NewHarnessDebugDB works like NewHarness, but fails the test immediately after +// migrations have been run. It is used to debug data & queries from a smoketest. +// +// Note that the now() function will be locked to the init timestamp for inspection. +func NewHarnessDebugDB(t *testing.T, initSQL, migrationName string) *Harness { + t.Helper() + h := NewStoppedHarness(t, initSQL, migrationName) + h.Migrate("") + + t.Fatal("DEBUG DB ::", h.dbURL) + return nil +} + +const ( + twilioAuthToken = "11111111111111111111111111111111" + twilioAccountSID = "AC00000000000000000000000000000000" +) + +// NewStoppedHarness will create a NewHarness, but will not call Start. +func NewStoppedHarness(t *testing.T, initSQL, migrationName string) *Harness { + t.Helper() + if testing.Short() { + t.Skip("skipping Harness tests for short mode") + } + + t.Logf("Using DB URL: %s", DBURL("")) + start := time.Now() + name := strings.Replace("smoketest_"+time.Now().Format("2006_01_02_15_04_05")+uuid.NewV4().String(), "-", "", -1) + + runCmd(t, exec.Command("psql", "-d", DBURL(""), "-c", "create database "+pq.QuoteIdentifier(name))) + t.Logf("created test database '%s': %s", name, DBURL(name)) + g := NewDataGen(t, "Phone", DataGenFunc(GenPhone)) + + twCfg := mocktwilio.Config{ + AuthToken: twilioAuthToken, + AccountSID: twilioAccountSID, + MinQueueTime: time.Second, // until we have a stateless backend for answering calls + } + + h := &Harness{ + phoneG: g, + uuidG: NewDataGen(t, "UUID", DataGenFunc(GenUUID)), + phoneCCG: NewDataGen(t, "PhoneCC", DataGenArgFunc(GenPhoneCC)), + dbURL: DBURL(name), + dbName: name, + lastTimeChange: start, + start: start, + + tw: newTWServer(t, mocktwilio.NewServer(twCfg)), + + t: t, + } + + h.twS = httptest.NewServer(h.tw) + + h.cmd = exec.Command( + "goalert", + "-l", "localhost:0", + "-v", + "--db-url", DBURL(name), + "--json", + "--twilio-base-url", h.twS.URL, + "--db-max-open", "5", // 2 for API 1 for engine + ) + h.cmd.Env = os.Environ() + + // freeze DB time until backend starts + h.execQuery(` + create schema testing_overrides; + alter database ` + pq.QuoteIdentifier(name) + ` set search_path = "$user", public,testing_overrides, pg_catalog; + + + create or replace function testing_overrides.now() + returns timestamp with time zone + as $$ + begin + return '` + start.Format(dbTimeFormat) + `'; + end; + $$ language plpgsql; + `) + + h.Migrate(migrationName) + h.initSlack() + h.execQuery(initSQL) + + return h +} + +func (h *Harness) Start() { + h.t.Helper() + r, err := h.cmd.StderrPipe() + if err != nil { + h.t.Fatalf("failed to get pipe for backend logs: %v", err) + } + + var cfg config.Config + cfg.Slack.Enable = true + cfg.Slack.AccessToken = h.slackApp.AccessToken + cfg.Slack.ClientID = h.slackApp.ClientID + cfg.Slack.ClientSecret = h.slackApp.ClientSecret + cfg.Twilio.Enable = true + cfg.Twilio.AccountSID = twilioAccountSID + cfg.Twilio.AuthToken = twilioAuthToken + cfg.Twilio.FromNumber = h.phoneG.Get("twilio") + data, err := json.Marshal(cfg) + if err != nil { + h.t.Fatalf("failed to marshal config: %v", err) + } + + cfgCmd := exec.Command("goalert", "migrate", "--db-url="+DBURL(h.dbName)) + cfgCmd.Stdin = bytes.NewReader(data) + out, err := cfgCmd.CombinedOutput() + if err != nil { + h.t.Fatalf("failed to migrate backend: %v\n%s", err, string(out)) + } + cfgCmd = exec.Command("goalert", "set-config", "--allow-empty-data-encryption-key", "--db-url="+DBURL(h.dbName)) + cfgCmd.Stdin = bytes.NewReader(data) + out, err = cfgCmd.CombinedOutput() + if err != nil { + h.t.Fatalf("failed to config backend: %v\n%s", err, string(out)) + } + + // resume the flow of time + h.db, err = sql.Open("postgres", h.dbURL) + if err != nil { + h.t.Fatalf("failed to open DB: %v", err) + } + err = h.db.QueryRow(`select pg_catalog.now()`).Scan(&h.pgResume) + if err != nil { + h.t.Fatalf("failed to get postgres timestamp: %v", err) + } + h.resumed = time.Now() + h.lastTimeChange = time.Now().Add(100 * time.Millisecond) + h.modifyDBOffset(0) + + h.cmd.Env = append(h.cmd.Env, "GOALERT_SLACK_BASE_URL="+h.slackS.URL) + err = h.cmd.Start() + if err != nil { + h.t.Fatalf("failed to start backend: %v", err) + } + urlCh := make(chan string) + go h.watchBackend(r) + go h.watchBackendLogs(r, urlCh) + + h.backendURL = <-urlCh + + err = h.tw.RegisterSMSCallback(h.phoneG.Get("twilio"), h.backendURL+"/v1/twilio/sms/messages") + if err != nil { + h.t.Fatalf("failed to init twilio (SMS callback): %v", err) + } + err = h.tw.RegisterVoiceCallback(h.phoneG.Get("twilio"), h.backendURL+"/v1/twilio/voice/call") + if err != nil { + h.t.Fatalf("failed to init twilio (voice callback): %v", err) + } + ctx := context.Background() + + h.nr, err = notificationrule.NewDB(ctx, h.db) + if err != nil { + h.t.Fatalf("failed to init notification rule backend: %v", err) + } + h.usr, err = user.NewDB(ctx, h.db) + if err != nil { + h.t.Fatalf("failed to init user backend: %v", err) + } + + aLog, err := alertlog.NewDB(ctx, h.db) + if err != nil { + h.t.Fatalf("failed to init alert log backend: %v", err) + } + + h.a, err = alert.NewDB(ctx, h.db, aLog) + if err != nil { + h.t.Fatalf("failed to init alert backend: %v", err) + } + + h.sessKey, err = keyring.NewDB(ctx, h.db, &keyring.Config{ + Name: "browser-sessions", + }) + if err != nil { + h.t.Fatalf("failed to init keyring: %v", err) + } + + h.authH, err = auth.NewHandler(ctx, h.db, auth.HandlerConfig{ + SessionKeyring: h.sessKey, + }) + if err != nil { + h.t.Fatalf("failed to init auth handler: %v", err) + } +} + +// URL returns the backend server's URL +func (h *Harness) URL() string { + return h.backendURL +} + +// Migrate will perform `steps` number of migrations. +func (h *Harness) Migrate(migrationName string) { + h.t.Helper() + h.t.Logf("Running migrations (target: %s)", migrationName) + data, err := exec.Command("goalert", "migrate", "--db-url", h.dbURL, "--up", migrationName).CombinedOutput() + if err != nil { + h.t.Log(string(data)) + h.t.Fatalf("failed to run migration: %v", err) + } +} + +// IgnoreErrorsWith will cause the Harness to ignore backend errors containing the specified substring. +func (h *Harness) IgnoreErrorsWith(substr string) { + h.mx.Lock() + defer h.mx.Unlock() + h.ignoreErrors = append(h.ignoreErrors, substr) +} + +func (h *Harness) modifyDBOffset(d time.Duration) { + n := time.Now() + d -= n.Sub(h.lastTimeChange) + if n.After(h.lastTimeChange) { + h.lastTimeChange = n + } + + h.delayOffset += d + + h.setDBOffset(h.delayOffset) +} +func (h *Harness) setDBOffset(d time.Duration) { + elapsed := time.Since(h.resumed) + h.t.Logf("Updating DB time offset to: %s (+ %s elapsed = %s since test start)", h.delayOffset.String(), elapsed.String(), (h.delayOffset + elapsed).String()) + + h.execQuery(fmt.Sprintf(` + create or replace function testing_overrides.now() + returns timestamp with time zone + as $$ + begin + return cast('%s' as timestamp with time zone) + (pg_catalog.now() - cast('%s' as timestamp with time zone))::interval; + end; + $$ language plpgsql; + `, + h.start.Add(d).Format(dbTimeFormat), + h.pgResume.Format(dbTimeFormat), + )) + h.trigger() +} + +// Delay will forward time (with regard to the database). It currently +// performs Sleep, but should be treated as a DB modification. +func (h *Harness) Delay(d time.Duration) { + h.t.Helper() + h.t.Logf("Wait %s", d.String()) + time.Sleep(d) + h.trigger() +} + +func (h *Harness) FastForward(d time.Duration) { + h.t.Helper() + h.t.Logf("Fast-forward %s", d.String()) + h.delayOffset += d + h.setDBOffset(h.delayOffset) +} + +func (h *Harness) execQuery(sql string) { + h.t.Helper() + t := template.New("sql") + t.Funcs(template.FuncMap{ + "uuid": func(id string) string { return fmt.Sprintf("'%s'", h.uuidG.Get(id)) }, + "phone": func(id string) string { return fmt.Sprintf("'%s'", h.phoneG.Get(id)) }, + "phoneCC": func(cc, id string) string { return fmt.Sprintf("'%s'", h.phoneCCG.GetWithArg(cc, id)) }, + + "slackChannelID": func(name string) string { return fmt.Sprintf("'%s'", h.Slack().Channel(name).ID()) }, + }) + _, err := t.Parse(sql) + if err != nil { + h.t.Fatalf("failed to parse query template: %v", err) + } + + b := new(bytes.Buffer) + err = t.Execute(b, nil) + if err != nil { + h.t.Fatalf("failed to render query template: %v", err) + } + + runCmd(h.t, exec.Command("psql", "-d", h.dbURL, "-c", b.String())) +} + +// CreateAlert will create a new unacknowledged alert. +func (h *Harness) CreateAlert(serviceID, summary string) { + h.t.Helper() + a := &alert.Alert{ + ServiceID: serviceID, + Summary: summary, + } + h.t.Logf("insert alert: %v", a) + permission.SudoContext(context.Background(), func(ctx context.Context) { + h.t.Helper() + _, err := h.a.Create(ctx, a) + if err != nil { + h.t.Fatalf("failed to insert alet: %v", err) + } + }) + h.trigger() +} + +// AddNotificationRule will add a notification rule to the database. +func (h *Harness) AddNotificationRule(userID, cmID string, delayMinutes int) { + h.t.Helper() + nr := ¬ificationrule.NotificationRule{ + DelayMinutes: delayMinutes, + UserID: userID, + ContactMethodID: cmID, + } + h.t.Logf("insert notification rule: %v", nr) + permission.SudoContext(context.Background(), func(ctx context.Context) { + h.t.Helper() + _, err := h.nr.Insert(ctx, nr) + if err != nil { + h.t.Fatalf("failed to insert notification rule: %v", err) + } + }) + h.trigger() +} + +// Trigger will trigger, and wait for, an engine cycle. +func (h *Harness) Trigger() { + go h.trigger() + + // wait for the next cycle to start and end before returning + http.Get(h.backendURL + "/health/engine") +} + +// Escalate will escalate an alert in the database, when 'level' matches. +func (h *Harness) Escalate(alertID, level int) { + h.t.Helper() + h.t.Logf("escalate alert #%d (from level %d)", alertID, level) + permission.SudoContext(context.Background(), func(ctx context.Context) { + err := h.a.Escalate(ctx, alertID, level) + if err != nil { + h.t.Fatalf("failed to escalate alert: %v", err) + } + }) + h.trigger() +} + +// Phone will return the generated phone number for the id provided. +func (h *Harness) Phone(id string) string { return h.phoneG.Get(id) } + +// PhoneCC will return the generated phone number for the id provided. +func (h *Harness) PhoneCC(cc, id string) string { return h.phoneCCG.GetWithArg(cc, id) } + +// UUID will return the generated UUID for the id provided. +func (h *Harness) UUID(id string) string { return h.uuidG.Get(id) } + +func (h *Harness) isClosing() bool { + h.mx.Lock() + defer h.mx.Unlock() + return h.closing +} + +func (h *Harness) dumpDB() { + testName := reflect.ValueOf(h.t).Elem().FieldByName("name").String() + file := filepath.Join("smoketest_db_dump", testName+".sql") + os.MkdirAll(filepath.Dir(file), 0755) + var t time.Time + err := h.db.QueryRow("select now()").Scan(&t) + if err != nil { + h.t.Fatalf("failed to get current timestamp: %v", err) + } + err = exec.Command( + "pg_dump", + "-O", "-x", "-a", + "-f", file, + h.dbURL, + ).Run() + if err != nil { + h.t.Errorf("failed to dump database '%s': %v", h.dbName, err) + } + fd, err := os.OpenFile(file, os.O_APPEND|os.O_WRONLY, 0644) + if err != nil { + h.t.Fatalf("failed to open DB dump: %v", err) + } + defer fd.Close() + _, err = fmt.Fprintf(fd, "\n-- Last Timestamp: %s\n", t.Format(time.RFC3339Nano)) + if err != nil { + h.t.Fatalf("failed to open DB dump: %v", err) + } +} + +// Close terminates any background processes, and drops the testing database. +// It should be called at the end of all tests (usually with `defer h.Close()`). +func (h *Harness) Close() error { + h.t.Helper() + h.tw.WaitAndAssert() + h.slack.WaitAndAssert() + h.slackS.Close() + h.twS.Close() + h.mx.Lock() + h.closing = true + h.mx.Unlock() + if h.cmd.Process != nil { + h.cmd.Process.Kill() + } + + h.dumpDB() + h.db.Close() + + err := exec.Command("psql", "-d", DBURL(""), "-c", "drop database "+h.dbName).Run() + if err != nil { + h.t.Errorf("failed to drop database '%s': %v", h.dbName, err) + } + + return nil +} + +// CreateUser generates a random user. +func (h *Harness) CreateUser() (u *user.User) { + h.t.Helper() + var err error + permission.SudoContext(context.Background(), func(ctx context.Context) { + u, err = h.usr.Insert(ctx, &user.User{ + Name: fmt.Sprintf("Generated%d", h.userGeneratedIndex), + ID: uuid.NewV4().String(), + Role: permission.RoleUser, + Email: fmt.Sprintf("generated%d@example.com", h.userGeneratedIndex), + }) + }) + if err != nil { + h.t.Fatal(errors.Wrap(err, "generate random user")) + } + h.userGeneratedIndex++ + return u +} + +// WaitAndAssertOnCallUsers will ensure the correct set of users as on-call for the given serviceID. +func (h *Harness) WaitAndAssertOnCallUsers(serviceID string, userIDs ...string) { + h.t.Helper() + doQL := func(query string, res interface{}) { + g := h.GraphQLQuery(query) + for _, err := range g.Errors { + h.t.Error("GraphQL Error:", err.Message) + } + if len(g.Errors) > 0 { + h.t.Fatal("errors returned from GraphQL") + } + if res == nil { + return + } + err := json.Unmarshal(g.Data, &res) + if err != nil { + h.t.Fatal("failed to parse response:", err) + } + } + + getUsers := func() []string { + var result struct { + Service struct { + OnCall []struct { + UserID string `json:"user_id"` + UserName string `json:"user_name"` + } `json:"on_call_users"` + } + } + + doQL(fmt.Sprintf(` + query { + service(id: "%s") { + on_call_users{ + user_id + user_name + } + } + } + `, serviceID), &result) + + var ids []string + for _, oc := range result.Service.OnCall { + ids = append(ids, oc.UserID) + } + if len(ids) == 0 { + return nil + } + sort.Strings(ids) + uniq := ids[:1] + last := ids[0] + for _, id := range ids[1:] { + if id == last { + continue + } + uniq = append(uniq, id) + last = id + } + return uniq + } + sort.Strings(userIDs) + match := func(final bool) bool { + ids := getUsers() + if len(ids) != len(userIDs) { + if final { + h.t.Fatalf("got %d on-call users; want %d", len(ids), len(userIDs)) + } + return false + } + for i, id := range userIDs { + if ids[i] != id { + if final { + h.t.Fatalf("on-call[%d] = %s; want %s", i, ids[i], id) + } + return false + } + } + return true + } + timeout := time.NewTimer(10 * time.Second) + defer timeout.Stop() + + check := time.NewTicker(100 * time.Millisecond) + defer check.Stop() + + for !match(false) { + select { + case <-check.C: // pulling from check + + case <-timeout.C: + match(true) + return + } + } +} diff --git a/smoketest/harness/harness_notunix.go b/smoketest/harness/harness_notunix.go new file mode 100644 index 0000000000..5d6a4dd12a --- /dev/null +++ b/smoketest/harness/harness_notunix.go @@ -0,0 +1,5 @@ +// +build windows + +package harness + +func (h *Harness) trigger() {} diff --git a/smoketest/harness/harness_unix.go b/smoketest/harness/harness_unix.go new file mode 100644 index 0000000000..92d6c347ee --- /dev/null +++ b/smoketest/harness/harness_unix.go @@ -0,0 +1,9 @@ +package harness + +import "syscall" + +func (h *Harness) trigger() { + if h.cmd.Process != nil { + h.cmd.Process.Signal(syscall.SIGUSR2) + } +} diff --git a/smoketest/harness/slack.go b/smoketest/harness/slack.go new file mode 100644 index 0000000000..63f3f55b68 --- /dev/null +++ b/smoketest/harness/slack.go @@ -0,0 +1,116 @@ +package harness + +import ( + "net/http/httptest" + "strings" + "time" + + "github.com/target/goalert/devtools/mockslack" +) + +type SlackServer interface { + Channel(string) SlackChannel + + WaitAndAssert() +} + +type SlackChannel interface { + ID() string + Name() string + + ExpectMessage(keywords ...string) +} + +type slackServer struct { + h *Harness + *mockslack.Server + channels map[string]*slackChannel +} + +type slackChannel struct { + h *Harness + name string + id string + + expected [][]string +} + +func (h *Harness) Slack() SlackServer { return h.slack } + +func (s *slackServer) WaitAndAssert() { + timeout := time.NewTimer(15 * time.Second) + defer timeout.Stop() + + t := time.NewTicker(time.Millisecond) + defer t.Stop() + + for _, ch := range s.channels { + for !ch.waitAndAssert(timeout.C) { + <-t.C + } + } +} + +func (s *slackServer) Channel(name string) SlackChannel { + ch := s.channels[name] + if ch != nil { + return ch + } + + info := s.NewChannel(name) + + ch = &slackChannel{h: s.h, name: "#" + name, id: info.ID} + s.channels[name] = ch + + return ch +} + +func (ch *slackChannel) ID() string { return ch.id } +func (ch *slackChannel) Name() string { return ch.name } +func (ch *slackChannel) ExpectMessage(keywords ...string) { + ch.expected = append(ch.expected, keywords) +} + +func (ch *slackChannel) waitAndAssert(timeout <-chan time.Time) bool { + msgs := ch.h.slack.Messages(ch.id) + + check := func(keywords []string) bool { + msgLoop: + for i, msg := range msgs { + for _, w := range keywords { + if !strings.Contains(msg.Text, w) { + continue msgLoop + } + } + msgs = append(msgs[:i], msgs[i+1:]...) + return true + } + return false + } + + for i, exp := range ch.expected { + select { + case <-timeout: + ch.h.t.Fatalf("timeout waiting for slack message: channel=%s; ID=%s; message=%d keywords=%v\nGot: %s", ch.name, ch.id, i, exp, msgs) + default: + } + if !check(exp) { + return false + } + } + + return true +} + +func (h *Harness) initSlack() { + + h.slack = &slackServer{ + h: h, + channels: make(map[string]*slackChannel), + Server: mockslack.NewServer(), + } + h.slackS = httptest.NewServer(h.slack) + + h.slackApp = h.slack.InstallApp("GoAlert Smoketest", "bot") + h.slackUser = h.slack.NewUser("GoAlert Smoketest User") +} diff --git a/smoketest/harness/twilio.go b/smoketest/harness/twilio.go new file mode 100644 index 0000000000..039bb3c029 --- /dev/null +++ b/smoketest/harness/twilio.go @@ -0,0 +1,427 @@ +package harness + +import ( + "github.com/target/goalert/devtools/mocktwilio" + "strings" + "testing" + "time" +) + +// TwilioServer is used to assert voice and SMS behavior. +type TwilioServer interface { + + // Device returns a TwilioDevice for the given number. + // + // It is safe to call multiple times for the same device. + Device(number string) TwilioDevice + + // WaitAndAssert will wait for all messages to be processed. + // + // It must be called before any calls to Body() will returned. + // + // Any unexpected messages (or missing ones) will result in a test failure. + WaitAndAssert() +} + +// A TwilioDevice immitates a device (i.e. a phone) for testing interactions. +type TwilioDevice interface { + + // ExpectSMS will match against an SMS that matches ALL provided keywords (case-insensitive). + // Each call to ExpectSMS results in the requirement that an additional SMS is received. + ExpectSMS(keywords ...string) TwilioExpectedMessage + + // ExpectVoice will match against a voice call where the spoken text matches ALL provided keywords (case-insensitive). + ExpectVoice(keywords ...string) TwilioExpectedCall + + // IgnoreUnexpectedSMS will cause any extra SMS messages (after processing ExpectSMS calls) that match + // ALL keywords (case-insensitive) to not fail the test. + IgnoreUnexpectedSMS(keywords ...string) + + // IgnoreUnexpectedVoice will cause any extra voice calls (after processing ExpectVoice) that match + // ALL keywords (case-insensitive) to not fail the test. + IgnoreUnexpectedVoice(keywords ...string) +} + +// TwilioExpectedCall represents a phone call. +type TwilioExpectedCall interface { + // ThenPress imitates a user entering a key on the phone. + ThenPress(digits string) TwilioExpectedCall + // ThenExpect asserts that the message matches ALL keywords (case-insensitive). + // + // Generally used as ThenPress().ThenExpect() + ThenExpect(keywords ...string) TwilioExpectedCall + + // RespondWithFailed will tell the backend that the call failed. + RespondWithFailed() + + // Body will return the full spoken message as text. Separate stanzas (e.g. multiple ``) are + // separated by newline. + // + // WaitAndAssert() must be called first or Body() will hang. + Body() string +} + +// TwilioExpectedMessage represents an SMS message. +type TwilioExpectedMessage interface { + + // ThenReply will respond with an SMS with the given body. + ThenReply(body string) + + // RespondWithFailed will tell the backend that message delivery failed. + RespondWithFailed() + + // Body is the text of the SMS message. + // + // WaitAndAssert() must be called first or Body() will hang. + Body() string +} + +type matcher interface { + Match(string) bool + String() string +} + +func matchKeywords(keywords []string) matcher { + lc := make([]string, len(keywords)) + for i, w := range keywords { + lc[i] = strings.ToLower(w) + } + return keywordMatcher(lc) +} + +type joinMatch struct { + a, b matcher + and bool +} + +func matchOR(a, b matcher) matcher { + if _, ok := a.(noneMatch); ok { + return b + } + return &joinMatch{a: a, b: b} +} +func matchAND(a, b matcher) matcher { + if kw, ok := a.(keywordMatcher); ok && len(kw) == 0 { + return b + } + return &joinMatch{a: a, b: b, and: true} +} +func (j *joinMatch) Match(msg string) bool { + if j.and { + return j.a.Match(msg) && j.b.Match(msg) + } + + return j.a.Match(msg) || j.b.Match(msg) + +} +func (j *joinMatch) String() string { + if j.and { + return j.a.String() + " AND " + j.b.String() + } + + return j.a.String() + " OR " + j.b.String() + +} + +type noneMatch struct{} + +func (noneMatch) Match(string) bool { return false } +func (noneMatch) String() string { return "" } + +type keywordMatcher []string + +func (k keywordMatcher) String() string { + return strings.Join([]string(k), ",") +} +func (k keywordMatcher) Match(msg string) bool { + msg = strings.ToLower(msg) + for _, word := range k { + if !strings.Contains(msg, word) { + return false + } + } + return true +} + +type twServer struct { + *mocktwilio.Server + t *testing.T + + devices map[string]*twDevice +} + +func newTWServer(t *testing.T, s *mocktwilio.Server) *twServer { + return &twServer{ + t: t, + Server: s, + devices: make(map[string]*twDevice), + } +} + +type expMessage struct { + dev *twDevice + matcher + body chan string + fail bool + reply string +} +type twDevice struct { + tw *twServer + number string + + ignoreMessages matcher + ignoreCalls matcher + + expMessages []*expMessage + expCalls []*expCall +} + +type expCall struct { + dev *twDevice + matcher + + step int + body chan string + digits string + fail bool + next *expCall +} + +func (dev *twDevice) IgnoreUnexpectedSMS(keywords ...string) { + dev.ignoreMessages = matchOR(dev.ignoreMessages, matchKeywords(keywords)) +} +func (dev *twDevice) IgnoreUnexpectedVoice(keywords ...string) { + dev.ignoreCalls = matchOR(dev.ignoreCalls, matchKeywords(keywords)) +} +func (dev *twDevice) ExpectSMS(keywords ...string) TwilioExpectedMessage { + msg := &expMessage{ + dev: dev, + matcher: matchKeywords(keywords), + body: make(chan string, 1), + } + dev.expMessages = append(dev.expMessages, msg) + return msg +} +func (msg *expMessage) ThenReply(body string) { + msg.reply = body +} +func (msg *expMessage) RespondWithFailed() { + msg.fail = true +} +func (msg *expMessage) Body() string { + b := <-msg.body + msg.body <- b + return b +} + +func (dev *twDevice) ExpectVoice(keywords ...string) TwilioExpectedCall { + call := &expCall{ + dev: dev, + matcher: matchKeywords(keywords), + body: make(chan string, 1), + } + dev.expCalls = append(dev.expCalls, call) + return call +} +func (call *expCall) RespondWithFailed() { + call.fail = true +} +func (call *expCall) ThenExpect(keywords ...string) TwilioExpectedCall { + call.matcher = matchAND(call.matcher, matchKeywords(keywords)) + return call +} +func (call *expCall) Body() string { + b := <-call.body + call.body <- b + return b +} +func (call *expCall) ThenPress(digits string) TwilioExpectedCall { + call.digits = digits + call.next = call.dev.ExpectVoice().(*expCall) + call.next.step = call.step + 1 + // remove call added by ExpectVoice() since it will be tracked by call.next + call.dev.expCalls = call.dev.expCalls[:len(call.dev.expCalls)-1] + return call.next +} + +func (tw *twServer) Device(number string) TwilioDevice { + dev := tw.devices[number] + if dev != nil { + return dev + } + dev = &twDevice{ + tw: tw, + number: number, + ignoreMessages: noneMatch{}, + ignoreCalls: noneMatch{}, + } + tw.devices[number] = dev + return dev +} + +// Twilio will return the mock Twilio API. It is safe to call multiple times. +func (h *Harness) Twilio() TwilioServer { + return h.tw +} +func (tw *twServer) timeoutFail() { + tw.t.Helper() + for num, dev := range tw.devices { + for _, msg := range dev.expMessages { + if msg == nil { + continue + } + tw.t.Errorf("Twilio: Did not receive SMS to %s containing: %s", num, msg.matcher.String()) + } + for _, call := range dev.expCalls { + if call == nil { + continue + } + tw.t.Errorf("Twilio: Did not receive voice call to %s (step #%d) containing: %s", num, call.step, call.matcher.String()) + } + } + tw.t.Error("Twilio: Timeout after 15 seconds waiting for one or more expected calls/messages.") +} +func (dev *twDevice) processSMS(sms *mocktwilio.SMS) { + dev.tw.t.Helper() + + for i, msg := range dev.expMessages { + if msg == nil { + continue + } + if !msg.Match(sms.Body()) { + continue + } + + dev.tw.t.Logf("Twilio: Received expected SMS to %s: %s", sms.To(), sms.Body()) + + // matches -- process it + if msg.fail { + sms.Reject() + } else { + sms.Accept() + } + + if msg.reply != "" { + dev.tw.SendSMS(sms.To(), sms.From(), msg.reply) + } + + msg.body <- sms.Body() + dev.expMessages[i] = nil + return + } + + if dev.ignoreMessages.Match(sms.Body()) { + // ignored + return + } + + // didn't match anything + dev.tw.unexpectedSMS(sms) +} + +func (dev *twDevice) processCall(vc *mocktwilio.VoiceCall) { + dev.tw.t.Helper() + if vc.Message() == "" { + dev.tw.t.Logf("Twilio: Received voice call to %s, asking for message.", vc.To()) + vc.Accept() + return + } + + for i, call := range dev.expCalls { + if call == nil { + continue + } + msg := vc.Message() + if !call.Match(msg) { + continue + } + + dev.tw.t.Logf("Twilio: Received expected voice call to %s (step #%d): %s", vc.To(), call.step, vc.Message()) + + if call.fail { + vc.Reject() + } + + if call.next != nil { + vc.PressDigits(call.digits) + dev.expCalls[i] = call.next + } else { + vc.Hangup() + dev.expCalls[i] = nil + } + + call.body <- msg + return + } + + // didn't match anything + dev.tw.unexpectedCall(vc) +} +func (tw *twDevice) done() bool { + for _, msg := range tw.expMessages { + if msg != nil { + return false + } + } + for _, call := range tw.expCalls { + if call != nil { + return false + } + } + return true +} +func (tw *twServer) unexpectedSMS(sms *mocktwilio.SMS) { + tw.t.Helper() + tw.t.Errorf("Twilio: Unexpected SMS to %s: %s", sms.To(), sms.Body()) +} +func (tw *twServer) unexpectedCall(vc *mocktwilio.VoiceCall) { + tw.t.Helper() + if vc.Message() != "" { + tw.t.Errorf("Twilio: Unexpected voice call (or message) to %s: %s", vc.To(), vc.Message()) + } else { + tw.t.Errorf("Twilio: Unexpected voice call to %s", vc.To()) + } +} +func (tw *twServer) WaitAndAssert() { + tw.t.Helper() + timeout := 15 * time.Second + t := time.NewTimer(timeout) + defer t.Stop() + + for { + var waiting bool + for _, dev := range tw.devices { + if !dev.done() { + waiting = true + break + } + } + if !waiting { + break + } + + select { + case <-t.C: + // test timed out + tw.timeoutFail() + return + case sms := <-tw.SMS(): + dev := tw.devices[sms.To()] + if dev == nil { + tw.unexpectedSMS(sms) + } else { + dev.processSMS(sms) + } + case vc := <-tw.VoiceCalls(): + dev := tw.devices[vc.To()] + if dev == nil { + tw.unexpectedCall(vc) + } else { + dev.processCall(vc) + } + case err := <-tw.Server.Errors(): + tw.t.Errorf("Twilio: %v", err) + } + t.Reset(timeout) + } +} diff --git a/smoketest/heartbeat_test.go b/smoketest/heartbeat_test.go new file mode 100644 index 0000000000..51bcacc2bc --- /dev/null +++ b/smoketest/heartbeat_test.go @@ -0,0 +1,75 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "net/http" + "net/url" + "testing" + "time" +) + +func TestHeartbeat(t *testing.T) { + t.Parallel() + + const sql = ` + insert into users (id, name, email) + values + ({{uuid "user"}}, 'bob', 'joe'); + + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'SMS', {{phone "1"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "user"}}, {{uuid "cm1"}}, 0), + ({{uuid "user"}}, {{uuid "cm1"}}, 1); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "user"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into integration_keys (id, type, name, service_id) + values + ({{uuid "int_key"}}, 'generic', 'my key', {{uuid "sid"}}); + + insert into heartbeat_monitors (id, name, service_id, heartbeat_interval) + values + ({{uuid "hb_key"}}, 'test', {{uuid "sid"}}, '5 minutes'); +` + h := harness.NewHarness(t, sql, "heartbeat-auth-log-data") + defer h.Close() + + heartbeat := func() { + v := make(url.Values) + v.Set("integrationKey", h.UUID("int_key")) + resp, err := http.PostForm(h.URL()+"/v1/api/heartbeat/"+h.UUID("hb_key"), v) + if err != nil { + t.Fatal("post to generic endpoint failed:", err) + } else if resp.StatusCode/100 != 2 { + t.Error("non-2xx response:", resp.Status) + } + resp.Body.Close() + } + + heartbeat() + h.FastForward(5 * time.Minute) // expire heartbeat + h.Twilio().Device(h.Phone("1")).ExpectSMS("heartbeat") + h.Twilio().WaitAndAssert() + heartbeat() + h.FastForward(time.Minute) + h.Delay(15 * time.Second) + // no more SMS +} diff --git a/smoketest/inprogress_test.go b/smoketest/inprogress_test.go new file mode 100644 index 0000000000..cae3d8f34a --- /dev/null +++ b/smoketest/inprogress_test.go @@ -0,0 +1,113 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" + "time" +) + +// TestInProgress ensures that sent and in-progress notifications for triggered alerts are honored through the migration. +func TestInProgress(t *testing.T) { + t.Parallel() + sql := ` + insert into users (id, name, email) + values + ({{uuid "u1"}}, 'bob', 'joe'), + ({{uuid "u2"}}, 'ben', 'josh'), + ({{uuid "u3"}}, 'beth', 'jake'); + + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "c1"}}, {{uuid "u1"}}, 'personal', 'SMS', {{phone "1"}}), + ({{uuid "c1_2"}}, {{uuid "u1"}}, 'personal', 'VOICE', {{phone "1"}}), + ({{uuid "c2"}}, {{uuid "u2"}}, 'personal', 'SMS', {{phone "2"}}), + ({{uuid "c2_2"}}, {{uuid "u2"}}, 'personal', 'VOICE', {{phone "2"}}), + ({{uuid "c3"}}, {{uuid "u3"}}, 'personal', 'SMS', {{phone "3"}}); + + insert into user_notification_rules (id, user_id, contact_method_id, delay_minutes) + values + ({{uuid ""}},{{uuid "u1"}}, {{uuid "c1"}}, 0), + ({{uuid ""}},{{uuid "u2"}}, {{uuid "c2"}}, 0), + ({{uuid ""}},{{uuid "u3"}}, {{uuid "c3"}}, 0), + ({{uuid ""}},{{uuid "u1"}}, {{uuid "c1"}}, 1), + ({{uuid ""}},{{uuid "u2"}}, {{uuid "c2"}}, 1), + ({{uuid ""}},{{uuid "u2"}}, {{uuid "c2_2"}}, 1), + ({{uuid ""}},{{uuid "u3"}}, {{uuid "c3"}}, 1); + + insert into escalation_policies (id, name, repeat) + values + ({{uuid "eid"}}, 'esc policy', -1); + insert into escalation_policy_steps (id, escalation_policy_id, delay) + values + ({{uuid "esid1"}}, {{uuid "eid"}}, 60), + ({{uuid "esid2"}}, {{uuid "eid"}}, 60), + ({{uuid "esid3"}}, {{uuid "eid"}}, 60); + + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid1"}}, {{uuid "u1"}}), + ({{uuid "esid1"}}, {{uuid "u2"}}), + ({{uuid "esid1"}}, {{uuid "u3"}}), + ({{uuid "esid2"}}, {{uuid "u2"}}), + ({{uuid "esid3"}}, {{uuid "u3"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (service_id, summary) + values + ({{uuid "sid"}}, 'testing1'), + ({{uuid "sid"}}, 'testing2'); + + insert into escalation_policy_state (alert_id, escalation_policy_id, escalation_policy_step_id, service_id) + values + (1, {{uuid "eid"}}, {{uuid "esid1"}}, {{uuid "sid"}}); + + insert into notification_policy_cycles (alert_id, user_id, last_tick) + values + (1, {{uuid "u1"}}, null), + (1, {{uuid "u2"}}, now() + '5 minutes'::interval), + (1, {{uuid "u3"}}, now() + '1 second'::interval); +` + h := harness.NewHarness(t, sql, "UserFavorites") + defer h.Close() + + tw := h.Twilio() + d1 := tw.Device(h.Phone("1")) + d2 := tw.Device(h.Phone("2")) + d3 := tw.Device(h.Phone("3")) + + d1.ExpectSMS("testing1") + d1.ExpectSMS("testing2") + + d2.ExpectSMS("testing2") + + d3.ExpectSMS("testing2") + + tw.WaitAndAssert() + + h.FastForward(time.Minute) + + d1.ExpectSMS("testing1") + d1.ExpectSMS("testing2") + + d2.ExpectSMS("testing2") + d2.ExpectVoice("testing2") + + d3.ExpectSMS("testing1") + d3.ExpectSMS("testing2") + + tw.WaitAndAssert() + + h.Escalate(1, 0) + + d2.ExpectSMS("testing1") + tw.WaitAndAssert() + + h.FastForward(time.Minute) + + d2.ExpectSMS("testing1") + d2.ExpectVoice("testing1") + tw.WaitAndAssert() +} diff --git a/smoketest/listalerts_test.go b/smoketest/listalerts_test.go new file mode 100644 index 0000000000..040adc0146 --- /dev/null +++ b/smoketest/listalerts_test.go @@ -0,0 +1,63 @@ +package smoketest + +import ( + "encoding/json" + "github.com/target/goalert/smoketest/harness" + "strconv" + "testing" +) + +func TestListAlerts(t *testing.T) { + t.Parallel() + + sql := ` + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); +` + for i := 0; i < 160; i++ { + name := "s" + strconv.Itoa(i) + sql += ` + insert into services (id, name, escalation_policy_id) values ({{uuid "` + name + `"}}, '` + name + `', {{uuid "eid"}}); + insert into alerts (service_id, description) values ({{uuid "` + name + `"}}, 'hi'); + ` + } + + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + resp := h.GraphQLQuery(` + query { + alerts { + id: _id + service_id + service {id, name} + } + } + `) + for _, err := range resp.Errors { + t.Error("fetch alerts:", err) + } + + var res struct { + Alerts []struct { + ID int + Service struct{ Name string } + } + } + + err := json.Unmarshal(resp.Data, &res) + if err != nil { + t.Fatal("failed to parse response:", err) + } + + if len(res.Alerts) == 0 { + t.Error("got 0 alerts; expected at least 1") + } + for _, a := range res.Alerts { + name := "s" + strconv.Itoa(a.ID-1) + if a.Service.Name != name { + t.Errorf("Alert[%d].Service.Name = %s; want %s", a.ID, a.Service.Name, name) + } + } +} diff --git a/smoketest/manualescalationnotification_test.go b/smoketest/manualescalationnotification_test.go new file mode 100644 index 0000000000..2e03fd1b60 --- /dev/null +++ b/smoketest/manualescalationnotification_test.go @@ -0,0 +1,60 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" + "time" +) + +// TestManualEscalation ensures that second step notifications are sent out when an acknowledged alert is manually escalated. +// When an acknowledged alert is manually escalated, it should escalate and go back to the 'unacknowleged' state. +// TestManualEscalation should create an alert in the acknowledged/active state, with a 2+ step EP, then trigger an escalation. Assert that the second step notifications are sent + +func TestManualEscalation(t *testing.T) { + t.Parallel() + sql := ` + insert into users (id, name, email) + values + ({{uuid "uid"}}, 'bob', 'joe'), + ({{uuid "uid2"}}, 'jane', 'xyz'); + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "c1"}}, {{uuid "uid"}}, 'personal', 'SMS', {{phone "1"}}), + ({{uuid "c2"}}, {{uuid "uid2"}}, 'personal', 'SMS', {{phone "2"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "uid"}}, {{uuid "c1"}}, 0), + ({{uuid "uid2"}}, {{uuid "c2"}}, 0); + + insert into escalation_policies (id, name, repeat) + values + ({{uuid "eid"}}, 'esc policy', -1); + insert into escalation_policy_steps (id, escalation_policy_id, delay) + values + ({{uuid "esid1"}}, {{uuid "eid"}}, 60), + ({{uuid "esid2"}}, {{uuid "eid"}}, 60); + + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid1"}}, {{uuid "uid"}}), + ({{uuid "esid2"}}, {{uuid "uid2"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (service_id, description, status) + values + ({{uuid "sid"}}, 'testing', 'active'); +` + + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + h.Delay(time.Second * 15) // ensure first notification is not sent out + h.Twilio().WaitAndAssert() // phone 2 should not get SMS before escalating + h.Escalate(1, 0) + + h.Twilio().Device(h.Phone("2")).ExpectSMS("testing") +} diff --git a/smoketest/migrations_test.go b/smoketest/migrations_test.go new file mode 100644 index 0000000000..7a8f0061ea --- /dev/null +++ b/smoketest/migrations_test.go @@ -0,0 +1,678 @@ +package smoketest + +import ( + "bufio" + "bytes" + "context" + "database/sql" + "fmt" + "github.com/target/goalert/migrate" + "github.com/target/goalert/smoketest/harness" + "math/rand" + "os" + "os/exec" + "regexp" + "sort" + "strings" + "testing" + "text/template" + "time" + + "github.com/lib/pq" + uuid "github.com/satori/go.uuid" +) + +type ignoreRule struct { + MigrationName string + TableName string + ColumnName string + ExtraRows bool + MissingRows bool +} + +var ignoreRules = []ignoreRule{ + // All migration timestamps will differ as they applied/re-applied + {TableName: "gorp_migrations", ColumnName: "applied_at"}, + + // id will be regenerated each time the table is created + {MigrationName: "ev3-assign-schedule-rotations", TableName: "assignments", ColumnName: "src_schedule_rule_id"}, + + // id will be regenerated each time the table is created + {MigrationName: "ev3-assign-schedule-rotations", TableName: "schedule_rules", ColumnName: "id"}, + + // id will be regenerated each time the table is created + {MigrationName: "ev3-assign-escalation-policy-steps", TableName: "escalation_policy_actions", ColumnName: "id"}, + + // client_id will be regenerated each time + {MigrationName: "ev3-notification-policy", TableName: "notification_policy_rule_state", ColumnName: "client_id"}, + + // rotation names were never used before, and was always "Default Rotation". Since they now need to be unique + // the migration uses the schedule name as a prefix, and throws away the old name. + {MigrationName: "ev3-assign-schedule-rotations", TableName: "rotations", ColumnName: "name"}, + + // rules always get recreated + {MigrationName: "ev3-assign-schedule-rotations", TableName: "schedule_rules", ColumnName: "created_at"}, + + // actions get recreated when migrating down + {MigrationName: "ev3-assign-alert", TableName: "escalation_policy_actions", ColumnName: "id"}, + + // process timestamp will change + {MigrationName: "twilio-sms-multiple-callbacks", TableName: "notification_policy_cycles", ColumnName: "last_tick"}, + {MigrationName: "ncycle-tick", TableName: "notification_policy_cycles", ColumnName: "last_tick"}, + + // migrate down should not end cycles once started + {MigrationName: "update-existing-escalations", TableName: "notification_policy_cycles", ExtraRows: true}, + + // migrate down should not clear on-call data + {MigrationName: "update-existing-escalations", TableName: "ep_step_on_call_users", ExtraRows: true}, + + // Old tables, data is safe to drop + {MigrationName: "drop-alert-escalation-policy-snapshots", TableName: "alert_escalation_policy_snapshots", MissingRows: true}, + {MigrationName: "drop-notification-logs", TableName: "notification_logs", MissingRows: true}, + {MigrationName: "drop-sent-notifications", TableName: "sent_notifications", MissingRows: true}, + {MigrationName: "drop-user-notification-cycles", TableName: "user_notification_cycles", MissingRows: true}, + + // Timestamp will change + {MigrationName: "drop-throttle", TableName: "throttle", ColumnName: "last_action_time"}, + + // End times are truncated to the minute + {MigrationName: "schedule-rule-endtime-fix", TableName: "schedule_rules", ColumnName: "end_time"}, +} + +const migrateInitData = ` + +insert into users (id, bio, email, role, name) +values + ({{uuid "u1"}}, {{text 20}}, {{text 8}}, 'admin', {{text 10}}), + ({{uuid "u2"}}, {{text 20}}, {{text 8}}, 'admin', {{text 10}}); + +insert into schedules (id, name, description, time_zone) +values + ({{uuid "sched1"}}, {{text 10}}, {{text 20}}, 'America/Chicago'), + ({{uuid "sched2"}}, {{text 10}}, {{text 20}}, 'America/Chicago'); + +insert into rotations (id, schedule_id, name, description, type) +values + ({{uuid "rot1"}}, {{uuid "sched1"}}, {{text 10}}, {{text 20}}, 'daily'), + ({{uuid "rot2"}}, {{uuid "sched2"}}, {{text 10}}, {{text 20}}, 'daily'); + + +insert into rotation_participants (id, rotation_id, position, user_id) +values + ({{uuid "rp1"}}, {{uuid "rot1"}}, 0, {{uuid "u1"}}), + ({{uuid "rp2"}}, {{uuid "rot1"}}, 1, {{uuid "u2"}}); + +insert into escalation_policies (id, name, description, repeat) +values + ({{uuid "e1"}}, {{text 10}}, {{text 20}}, 1), + ({{uuid "e2"}}, {{text 10}}, {{text 20}}, 0); + +insert into escalation_policy_steps (id, delay, step_number, escalation_policy_id) +values + ({{uuid "es1"}}, 1, 0, {{uuid "e1"}}), + ({{uuid "es2"}}, 1, 1, {{uuid "e1"}}), + ({{uuid "es3"}}, 1, 2, {{uuid "e1"}}), + ({{uuid "es4"}}, 1, 0, {{uuid "e2"}}); + +insert into escalation_policy_actions (id, escalation_policy_step_id, user_id, schedule_id) +values + ({{uuid "epa1"}}, {{uuid "es1"}}, {{uuid "u1"}}, NULL), + ({{uuid "epa2"}}, {{uuid "es1"}}, NULL, {{uuid "sched1"}}); + +insert into services (id, name, description, escalation_policy_id) +values + ({{uuid "s1"}}, {{text 10}}, {{text 20}}, {{uuid "e1"}}), + ({{uuid "s2"}}, {{text 10}}, {{text 20}}, {{uuid "e1"}}), + ({{uuid "s3"}}, {{text 10}}, {{text 20}}, {{uuid "e2"}}); + +insert into alerts (description, service_id) +values + ({{text 30}}, {{uuid "s1"}}), + ({{text 30}}, {{uuid "s1"}}), + ({{text 30}}, {{uuid "s2"}}), + ({{text 30}}, {{uuid "s2"}}), + ({{text 30}}, {{uuid "s1"}}), + ({{text 30}}, {{uuid "s3"}}); + +insert into user_contact_methods (id, user_id, name, type, value) +values + ({{uuid "c1"}}, {{uuid "u1"}}, {{text 8}}, 'SMS', {{phone "1"}}), + ({{uuid "c2"}}, {{uuid "u1"}}, {{text 8}}, 'VOICE', {{phone "1"}}), + ({{uuid "c3"}}, {{uuid "u2"}}, {{text 8}}, 'SMS', {{phone "2"}}); + +insert into user_notification_rules (id, user_id, contact_method_id, delay_minutes) +values + ({{uuid "n1"}}, {{uuid "u1"}}, {{uuid "c1"}}, 0), + ({{uuid "n2"}}, {{uuid "u1"}}, {{uuid "c2"}}, 1); + +insert into user_notification_cycles (id, user_id, alert_id, escalation_level, started_at) +values + ({{uuid "ncy1"}}, {{uuid "u1"}}, 1, 0, now()); + +insert into sent_notifications (id, cycle_id, alert_id, contact_method_id, notification_rule_id, sent_at) +values + ({{uuid "cb1"}}, {{uuid "ncy1"}}, 1, {{uuid "c1"}}, {{uuid "n1"}}, now()), + ({{uuid "cb2"}}, {{uuid "ncy1"}}, 1, {{uuid "c2"}}, {{uuid "n2"}}, now()); +` + +type pgDumpEntry struct { + Name string + Body string +} + +var enumRx = regexp.MustCompile(`(?s)CREATE TYPE ([\w_.]+) AS ENUM \(\s*(.*)\s*\);`) + +// enumOK handles checking for safe enum differences. This case is that migrate +// up can add, but migrate down will not remove new enum values. +// +// migrate down can't safely remove enum values, but it's safe for new ones +// to exist. So we simply check that all original items exist. +func enumOK(got, want string) bool { + partsW := enumRx.FindStringSubmatch(want) + if len(partsW) != 3 { + return false + } + partsG := enumRx.FindStringSubmatch(got) + if len(partsG) != 3 { + return false + } + if partsW[1] != partsG[1] { + return false + } + + gotItems := strings.Split(partsG[2], ",\n") + wantItems := strings.Split(partsW[2], ",\n") + + g := make(map[string]bool, len(gotItems)) + for _, v := range gotItems { + g[strings.TrimSpace(v)] = true + } + + for _, v := range wantItems { + if !g[strings.TrimSpace(v)] { + return false + } + } + + return true +} +func TestEnumOK(t *testing.T) { + const got = `CREATE TYPE enum_alert_log_event AS ENUM ( +'created', +'reopened', +'status_changed', +'assignment_changed', +'escalated', +'closed', +'notification_sent', +'response_received', +'acknowledged', +'policy_updated', +'duplicate_suppressed', +'escalation_request' +);` + const want = `CREATE TYPE enum_alert_log_event AS ENUM ( +'created', +'reopened', +'status_changed', +'assignment_changed', +'escalated', +'closed', +'notification_sent', +'response_received' +);` + + if !enumOK(got, want) { + t.Errorf("got false; want true") + } +} + +func processIgnoreRules(ignoreRules []ignoreRule, name, body string) string { + for _, r := range ignoreRules { + if r.MigrationName != "" && r.MigrationName != name { + continue + } + if !strings.HasPrefix(body, "COPY "+r.TableName+" ") && !strings.HasPrefix(body, "COPY public."+r.TableName+" ") { + continue + } + lines := strings.Split(body, "\n") + pref, cols, suf := getCols(lines[0]) + var index = -1 + for i, v := range cols { + if v == r.ColumnName { + index = i + } + } + if index == -1 { + continue + } + newLen := len(cols) - 1 + copy(cols[index:], cols[index+1:]) + cols = cols[:newLen] + lines[0] = pref + strings.Join(cols, ", ") + suf + + data := lines[1 : len(lines)-1] + for i, l := range data { + cols = strings.Split(l, "\t") + copy(cols[index:], cols[index+1:]) + cols = cols[:newLen] + data[i] = strings.Join(cols, "\t") + } + body = strings.Join(lines, "\n") + } + return body +} +func TestProcessIgnoreRules(t *testing.T) { + t.Parallel() + const input = `COPY public.my_table (foo, bar, baz) FROM stdin; +1 2 3 +a b c +\.` + const expected = `COPY public.my_table (foo, baz) FROM stdin; +1 3 +a c +\.` + rules := []ignoreRule{ + {MigrationName: "foo", TableName: "my_table", ColumnName: "bar"}, + } + result := processIgnoreRules(rules, "foo", input) + if result != expected { + t.Errorf("got\n%s\n\nwant\n%s", result, expected) + } + +} + +func getCols(line string) (prefix string, cols []string, suffix string) { + cols = strings.SplitN(line, "(", 2) + + prefix = cols[0] + "(" + suffix = cols[1] + cols = strings.SplitN(suffix, ")", 2) + suffix = ")" + cols[1] + cols = strings.Split(cols[0], ", ") + + return prefix, cols, suffix +} + +func alphabetizeCopy(body string) string { + lines := strings.Split(body, "\n") + data := lines[1 : len(lines)-1] + + pref, cols, suf := getCols(lines[0]) + + orig := make(map[string]int, len(cols)) + for i, c := range cols { + orig[c] = i + } + sort.Strings(cols) + lines[0] = pref + strings.Join(cols, ", ") + suf + + order := make(map[int]int, len(cols)) + for i, c := range cols { + order[orig[c]] = i + } + + for n, l := range data { + cols = strings.Split(l, "\t") + sorted := make([]string, len(cols)) + for i, v := range cols { + sorted[order[i]] = v + } + + data[n] = strings.Join(sorted, "\t") + } + + sort.Strings(lines[1:]) + return strings.Join(lines, "\n") +} +func TestAlphabetizeCopy(t *testing.T) { + t.Parallel() + const input = `COPY foobar (a, e, f, b, c) FROM stdin; +first second third fourth fifth +\.` + const expected = `COPY foobar (a, b, c, e, f) FROM stdin; +\. +first fourth fifth second third` + result := alphabetizeCopy(input) + if result != expected { + t.Errorf("got\n%s\n\nwant\n%s", result, expected) + } +} + +func parsePGDump(data []byte, name string) []pgDumpEntry { + rd := bufio.NewReader(bytes.NewReader(data)) + + entries := make([]pgDumpEntry, 0, 10000) + var entry pgDumpEntry + + addEntry := func() { + if strings.Contains(entry.Body, "COPY notifications (user_id, started_at) FROM stdin") { + // we ignore the (old) notifications table + // since it's trigger based and always re-calculated + // + // which makes it near impossible to test migrations + // + // it also doesn't work properly anyhow, which is why it has been + // replaced. + return + } + + entry.Body = strings.TrimSpace(entry.Body) + entry.Name = strings.TrimSpace(entry.Name) + + if strings.HasPrefix(entry.Body, "COPY ") && strings.Contains(entry.Name, "Type: TABLE DATA") { + // ignore column order, as long as the data matches + entry.Body = processIgnoreRules(ignoreRules, name, entry.Body) + entry.Body = alphabetizeCopy(entry.Body) + } + if strings.Contains(entry.Body, "REPLICA IDENTITY NOTHING") && strings.Contains(entry.Body, "ALTER TABLE ONLY") { + // skip 'view' tables + return + } + + if strings.Contains(entry.Name, " _RETURN; Type: RULE") { + // view return rule -> convert to view + tname := strings.SplitN(entry.Name, " ", 2)[0] + entry.Name = strings.Replace(entry.Name, " _RETURN; Type: RULE", "; Type: VIEW", 1) + entry.Body = strings.Replace(entry.Body, "CREATE RULE \"_RETURN\" AS\n", "", 1) + entry.Body = strings.Replace(entry.Body, + "ON SELECT TO "+tname+" DO INSTEAD ", + "CREATE VIEW "+tname+" AS\n", + 1, + ) + } + + if strings.HasPrefix(entry.Body, "CREATE TABLE") { + // order args alphabetically + lines := strings.Split(entry.Body, "\n") + sort.Strings(lines[1 : len(lines)-1]) + for i := 1; i < len(lines)-1; i++ { + if !strings.HasSuffix(lines[i], ",") { + lines[i] += "," + } + } + entry.Body = strings.Join(lines, "\n") + } + + entries = append(entries, entry) + } + + for { + line, err := rd.ReadString('\n') + if err != nil { + break + } + if strings.HasPrefix(line, "-- Name: ") { + entry.Name = strings.TrimSpace(strings.TrimPrefix(line, "-- Name: ")) + entry.Body = "" + rd.ReadString('\n') // skip next line + continue + } else if strings.HasPrefix(line, "-- Data for Name: ") { + entry.Name = strings.TrimSpace(strings.TrimPrefix(line, "-- Data for Name: ")) + entry.Body = "" + rd.ReadString('\n') // skip next line + continue + } else if strings.HasPrefix(line, "--") { + if entry.Name != "" { + addEntry() + } + entry.Body = "" + entry.Name = "" + } else if entry.Name != "" && line != "" { + entry.Body += strings.Trim(line, "\n ") + "\n" + } + } + + return entries +} +func indent(str string) string { + return " " + strings.Replace(str, "\n", "\n ", -1) +} + +// https://stackoverflow.com/questions/22892120/how-to-generate-a-random-string-of-a-fixed-length-in-golang +var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +func randStringRunes(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = letterRunes[rand.Intn(len(letterRunes))] + } + return string(b) +} + +func renderQuery(t *testing.T, sql string) string { + tmpl := template.New("sql") + uuidG := harness.NewDataGen(t, "UUID", harness.DataGenFunc(harness.GenUUID)) + phoneG := harness.NewDataGen(t, "Phone", harness.DataGenFunc(harness.GenPhone)) + phoneCCG := harness.NewDataGen(t, "PhoneCC", harness.DataGenArgFunc(harness.GenPhoneCC)) + strs := make(map[string]bool) + tmpl.Funcs(template.FuncMap{ + "uuid": func(id string) string { return fmt.Sprintf("'%s'", uuidG.Get(id)) }, + "phone": func(id string) string { return fmt.Sprintf("'%s'", phoneG.Get(id)) }, + "phoneCC": func(cc, id string) string { return fmt.Sprintf("'%s'", phoneCCG.GetWithArg(cc, id)) }, + "text": func(n int) string { + val := randStringRunes(n) + for strs[val] { + val = randStringRunes(n) + } + strs[val] = true + return fmt.Sprintf("'%s'", val) + }, + }) + _, err := tmpl.Parse(sql) + if err != nil { + t.Fatalf("failed to parse query template: %v", err) + } + b := new(bytes.Buffer) + err = tmpl.Execute(b, nil) + if err != nil { + t.Fatalf("failed to render query template: %v", err) + } + return b.String() +} + +func (e pgDumpEntry) matchesBody(migrationName string, body string) bool { + if e.Body == body { + return true + } + if enumOK(body, e.Body) { + return true + } + + if !strings.HasPrefix(e.Body, "COPY ") { + return false + } + + // check for extra rows rule + var extraRows, missingRows bool + for _, r := range ignoreRules { + if r.MigrationName != migrationName { + continue + } + if !strings.HasPrefix(e.Name, r.TableName+";") { + continue + } + extraRows = extraRows || r.ExtraRows + missingRows = missingRows || r.MissingRows + } + if !extraRows && !missingRows { + return false + } + + e.Body = strings.TrimSuffix(e.Body, "\n\\.") + body = strings.TrimSuffix(body, "\n\\.") + if extraRows { + rows := strings.Split(body, "\n") + for i := range rows { + if e.Body == strings.Join(rows[:len(rows)-i], "\n") { + return true + } + } + } + + if missingRows { + rows := strings.Split(e.Body, "\n") + for i := range rows { + if body == strings.Join(rows[:len(rows)-i], "\n") { + return true + } + } + } + + return false +} +func TestMigrations(t *testing.T) { + if testing.Short() { + t.Skip("skipping migrations tests for short mode") + } + t.Parallel() + start := "atomic-escalation-policies" + t.Logf("Starting migration testing at %s", start) + + db, err := sql.Open("postgres", harness.DBURL("")) + if err != nil { + t.Fatal("failed to open db:", err) + } + defer db.Close() + dbName := strings.Replace("migrations_smoketest_"+time.Now().Format("2006_01_02_03_04_05")+uuid.NewV4().String(), "-", "", -1) + + _, err = db.Exec("create database " + pq.QuoteIdentifier(dbName)) + if err != nil { + t.Fatal("failed to create db:", err) + } + defer db.Exec("drop database " + pq.QuoteIdentifier(dbName)) + + db, err = sql.Open("postgres", harness.DBURL(dbName)) + if err != nil { + t.Fatal("failed to open created db:", err) + } + defer db.Close() + + n, err := migrate.Up(context.Background(), db, start) + if err != nil { + t.Fatal("failed to apply initial migrations:", err) + } + + initSQL := renderQuery(t, migrateInitData) + + data, err := exec.Command("psql", + "-d", harness.DBURL(dbName), + "-c", initSQL, + ).CombinedOutput() + if err != nil { + t.Fatalf("failed to init db (%v):\n%s", err, string(data)) + } + + names := migrate.Names() + env, _ := os.LookupEnv("SKIP_TO") + if env != "" { + start = env + } + var idx int + for idx = range names { + if names[idx+1] == start { + break + } + } + + names = names[idx:] + if env, ok := os.LookupEnv("SKIP_TO"); ok && env != "" { + n, err := migrate.Up(context.Background(), db, env) + if err != nil { + t.Fatal("failed to apply skip migrations:", err) + } + if n == 0 { + t.Fatal("SKIP_TO already applied") + } + t.Logf("Skipping to %s", start) + } + + snapshot := func(t *testing.T, name string) []pgDumpEntry { + data, err := exec.Command("pg_dump", + "-d", harness.DBURL(dbName), + "-O", + ).Output() + if err != nil { + t.Fatal("failed to dump db:", err) + } + return parsePGDump(data, name) + } + mm := 0 + checkDiff := func(t *testing.T, typ, migrationName string, a, b []pgDumpEntry) bool { + m1 := make(map[string]string) + m2 := make(map[string]string) + for _, e := range a { + m1[e.Name] = e.Body + } + for _, e := range b { + m2[e.Name] = e.Body + } + var mismatch bool + for _, e := range a { + body, ok := m2[e.Name] + if !ok { + mismatch = true + t.Errorf("%s missing\n%s\n%s", typ, e.Name, indent(e.Body)) + continue + } + if !e.matchesBody(migrationName, body) { + mismatch = true + t.Errorf("%s mismatch\n%s\ngot\n%s\nwant\n%s", typ, e.Name, indent(body), indent(e.Body)) + continue + } + } + for _, e := range b { + _, ok := m1[e.Name] + if !ok { + mismatch = true + t.Errorf("%s leftover\n%s\n%s", typ, e.Name, indent(e.Body)) + } + } + + mm++ + return mismatch + } + for i, migrationName := range names[1:] { + lastMigrationName := names[i] + var applied bool + pass := t.Run(migrationName, func(t *testing.T) { + ctx := context.Background() + + orig := snapshot(t, migrationName) + n, err = migrate.Up(ctx, db, migrationName) + if err != nil { + t.Fatalf("failed to apply UP migration: %v", err) + } + if n == 0 { + return + } + applied = true + upSnap := snapshot(t, migrationName) + _, err = migrate.Down(ctx, db, lastMigrationName) + if err != nil { + t.Fatalf("failed to apply DOWN migration: %v", err) + } + applied = false + s := snapshot(t, migrationName) + if checkDiff(t, "DOWN", migrationName, orig, s) { + t.Fatalf("DOWN migration did not restore previous schema") + } + + _, err = migrate.Up(ctx, db, migrationName) + if err != nil { + t.Fatalf("failed to apply UP migration (2nd time): %v", err) + } + applied = true + s = snapshot(t, migrationName) + if checkDiff(t, "UP", migrationName, upSnap, s) { + t.Fatalf("UP migration did not restore previous schema") + } + }) + if !pass && !applied { + n, err = migrate.Up(context.Background(), db, migrationName) + if err != nil || n == 0 { + t.Fatalf("failed to apply UP migration; abort") + } + } + } +} diff --git a/smoketest/missinguser_test.go b/smoketest/missinguser_test.go new file mode 100644 index 0000000000..de08bfb37e --- /dev/null +++ b/smoketest/missinguser_test.go @@ -0,0 +1,100 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" +) + +// TestMissingUser tests that notifications go out, even when data is in an odd state. +// +// - escalation policy with no steps +// - escalation policy with steps missing actions +// - policy step with schedule and no users +// - policy step with schedule that starts in the future +func TestMissingUser(t *testing.T) { + t.Parallel() + + const sql = ` + insert into users (id, name, email) + values + ({{uuid "u1"}}, 'bob', 'joe'), + ({{uuid "u2"}}, 'ben', 'frank'); + + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "c1"}}, {{uuid "u1"}}, 'personal', 'SMS', {{phone "1"}}), + ({{uuid "c2"}}, {{uuid "u2"}}, 'personal', 'SMS', {{phone "2"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "u1"}}, {{uuid "c1"}}, 0), + ({{uuid "u2"}}, {{uuid "c2"}}, 0); + + insert into schedules (id, name, time_zone) + values + ({{uuid "empty_sched"}}, 'empty', 'America/Chicago'), + ({{uuid "empty_rot_sched"}}, 'empty rot', 'America/Chicago'), + ({{uuid "future_sched"}}, 'future', 'America/Chicago'); + + insert into rotations (id, schedule_id, name, type, start_time, shift_length) + values + ({{uuid ""}}, {{uuid "empty_rot_sched"}}, 'def', 'daily', now() - '1 hour'::interval, 1), + ({{uuid "future_rot"}}, {{uuid "future_sched"}}, 'def', 'daily', now() + '1 hour'::interval, 1); + + insert into rotation_participants (id, rotation_id, position, user_id) + values + ({{uuid ""}}, {{uuid "future_rot"}}, 0, {{uuid "u1"}}); + + insert into escalation_policies (id, name) + values + ({{uuid "empty_policy"}}, 'esc policy'), + ({{uuid "empty_step"}}, 'empty step'), + ({{uuid "empty_sched_pol"}}, 'empty sched'), + ({{uuid "empty_rot_pol"}}, 'empty rot'), + ({{uuid "future_sched_pol"}}, 'future'), + ({{uuid "tech.correct"}}, 'woot'); + + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid ""}}, {{uuid "empty_step"}}), + ({{uuid "empty_sched_step"}}, {{uuid "empty_sched_pol"}}), + ({{uuid "empty_rot_step"}}, {{uuid "empty_rot_pol"}}), + ({{uuid "future_sched_step"}}, {{uuid "future_sched_pol"}}), + ({{uuid "tech.correct_step"}}, {{uuid "tech.correct"}}); + + insert into escalation_policy_actions (escalation_policy_step_id, user_id, schedule_id) + values + ({{uuid "empty_sched_step"}}, null, {{uuid "empty_sched"}}), + ({{uuid "empty_rot_step"}}, null, {{uuid "empty_rot_sched"}}), + ({{uuid "future_sched_step"}}, null, {{uuid "future_sched"}}), + ({{uuid "tech.correct_step"}}, null, {{uuid "empty_sched"}}), + ({{uuid "tech.correct_step"}}, {{uuid "u1"}}, null); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "s1"}}, {{uuid "empty_policy"}}, 'service1'), + ({{uuid "s2"}}, {{uuid "empty_step"}}, 'service2'), + ({{uuid "s3"}}, {{uuid "empty_sched_pol"}}, 'service3'), + ({{uuid "s4"}}, {{uuid "future_sched_pol"}}, 'service4'), + ({{uuid "s5"}}, {{uuid "tech.correct"}}, 'service5'), + ({{uuid "s6"}}, {{uuid "empty_rot_pol"}}, 'service6'); + + insert into alerts (service_id, description) + values + ({{uuid "s1"}}, 'emptypol'), + ({{uuid "s2"}}, 'emptystep'), + ({{uuid "s3"}}, 'emptysched'), + ({{uuid "s4"}}, 'futuresched'), + ({{uuid "s5"}}, 'correct'), + ({{uuid "s6"}}, 'emptyrot'); + +` + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + d := h.Twilio().Device(h.Phone("1")) + h.Escalate(1, 0) + d.ExpectSMS("correct") + + // Rotations will always have someone active, as long as there are 1 or more participants +} diff --git a/smoketest/multistepnotification_test.go b/smoketest/multistepnotification_test.go new file mode 100644 index 0000000000..5b6d68ce19 --- /dev/null +++ b/smoketest/multistepnotification_test.go @@ -0,0 +1,61 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" + "time" +) + +// TestMultiStepNotifications tests that SMS and Voice goes out for +// 1 alert -> service -> esc -> step -> user. with 3 notification rules (1 of each immediately, sms 1 minute later) +func TestMultiStepNotifications(t *testing.T) { + t.Parallel() + + sql := ` + insert into users (id, name, email) + values + ({{uuid "u1"}}, 'bob', 'joe'); + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "c1"}}, {{uuid "u1"}}, 'personal', 'SMS', {{phone "1"}}), + ({{uuid "c2"}}, {{uuid "u1"}}, 'personal', 'VOICE', {{phone "2"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "u1"}}, {{uuid "c1"}}, 0), + ({{uuid "u1"}}, {{uuid "c2"}}, 0), + ({{uuid "u1"}}, {{uuid "c1"}}, 1); + + insert into escalation_policies (id, name) + values + ({{uuid "e1"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "es1"}}, {{uuid "e1"}}); + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "es1"}}, {{uuid "u1"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "s1"}}, {{uuid "e1"}}, 'service'); + + insert into alerts (service_id, description) + values + ({{uuid "s1"}}, 'testing'); +` + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + tw := h.Twilio() + d1 := tw.Device(h.Phone("1")) + d2 := tw.Device(h.Phone("2")) + + d1.ExpectSMS("testing") + d2.ExpectVoice("testing") + tw.WaitAndAssert() + + h.FastForward(time.Minute) + d1.ExpectSMS("testing") + +} diff --git a/smoketest/multiuser_test.go b/smoketest/multiuser_test.go new file mode 100644 index 0000000000..3a12ab7218 --- /dev/null +++ b/smoketest/multiuser_test.go @@ -0,0 +1,59 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" +) + +// TestMultiUser checks that if multiple users are assigned to a policy step, +// they all get their notifications. +func TestMultiUser(t *testing.T) { + t.Parallel() + + sql := ` + insert into users (id, name, email) + values + ({{uuid "u1"}}, 'bob', 'joe'), + ({{uuid "u2"}}, 'ben', 'josh'), + ({{uuid "u3"}}, 'beth', 'jake'); + + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "c1"}}, {{uuid "u1"}}, 'personal', 'SMS', {{phone "1"}}), + ({{uuid "c2"}}, {{uuid "u2"}}, 'personal', 'SMS', {{phone "2"}}), + ({{uuid "c3"}}, {{uuid "u3"}}, 'personal', 'SMS', {{phone "3"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "u1"}}, {{uuid "c1"}}, 0), + ({{uuid "u2"}}, {{uuid "c2"}}, 0), + ({{uuid "u3"}}, {{uuid "c3"}}, 0); + + insert into escalation_policies (id, name, repeat) + values + ({{uuid "eid"}}, 'esc policy', -1); + insert into escalation_policy_steps (id, escalation_policy_id, delay) + values + ({{uuid "esid"}}, {{uuid "eid"}}, 60); + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "u1"}}), + ({{uuid "esid"}}, {{uuid "u2"}}), + ({{uuid "esid"}}, {{uuid "u3"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (service_id, description) + values + ({{uuid "sid"}}, 'testing'); + ` + + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + h.Twilio().Device(h.Phone("1")).ExpectSMS("testing") + h.Twilio().Device(h.Phone("2")).ExpectSMS("testing") + h.Twilio().Device(h.Phone("3")).ExpectSMS("testing") +} diff --git a/smoketest/policyreassignment_test.go b/smoketest/policyreassignment_test.go new file mode 100644 index 0000000000..0e8e0a24c8 --- /dev/null +++ b/smoketest/policyreassignment_test.go @@ -0,0 +1,97 @@ +package smoketest + +import ( + "fmt" + "github.com/target/goalert/smoketest/harness" + "testing" + "time" +) + +// TestPolicyReassignment tests that only the active escalation policy is used for alerts. +func TestPolicyReassignment(t *testing.T) { + t.Parallel() + + const sql = ` +insert into users (id, name, email) +values + ({{uuid "user"}}, 'bob', 'joe'), + ({{uuid "user2"}}, 'bob2', 'joe2'); + +insert into user_contact_methods (id, user_id, name, type, value) +values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'SMS', {{phone "1"}}), + ({{uuid "cm2"}}, {{uuid "user2"}}, 'personal', 'SMS', {{phone "2"}}); + +insert into user_notification_rules (user_id, contact_method_id, delay_minutes) +values + ({{uuid "user"}}, {{uuid "cm1"}}, 0), + ({{uuid "user2"}}, {{uuid "cm2"}}, 0); + +insert into escalation_policies (id, name) +values + ({{uuid "ep1"}}, 'esc policy 1'), + ({{uuid "ep2"}}, 'esc policy 2'); + +insert into escalation_policy_steps (id, escalation_policy_id, delay) +values + ({{uuid "ep1_1"}}, {{uuid "ep1"}}, 1), + ({{uuid "ep1_2"}}, {{uuid "ep1"}}, 60), + ({{uuid "ep2_1"}}, {{uuid "ep2"}}, 60); + +insert into escalation_policy_actions (escalation_policy_step_id, user_id) +values + ({{uuid "ep1_1"}}, {{uuid "user"}}), + ({{uuid "ep1_2"}}, {{uuid "user"}}), + ({{uuid "ep2_1"}}, {{uuid "user2"}}); + +insert into services (id, escalation_policy_id, name) +values + ({{uuid "sid"}}, {{uuid "ep1"}}, 'service'); + +insert into alerts (service_id, description) +values + ({{uuid "sid"}}, 'testing'); + +` + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + tw := h.Twilio() + d1 := tw.Device(h.Phone("1")) + d2 := tw.Device(h.Phone("2")) + + d1.ExpectSMS("testing") + tw.WaitAndAssert() + + h.GraphQLQuery(fmt.Sprintf(` + mutation{ + updateService(input:{ + id: "%s" + name: "ok" + escalation_policy_id: "%s" + }) {id} + } + `, h.UUID("sid"), h.UUID("ep2"))) + + d2.ExpectSMS("testing") + tw.WaitAndAssert() + + h.FastForward(time.Minute) + // no new alerts + h.Delay(15 * time.Second) + tw.WaitAndAssert() + + h.GraphQLQuery(fmt.Sprintf(` + mutation{ + updateService(input:{ + id: "%s" + name: "ok" + escalation_policy_id: "%s" + }) {id} + } + `, h.UUID("sid"), h.UUID("ep1"))) + + // should get immediate message + d1.ExpectSMS("testing") + tw.WaitAndAssert() +} diff --git a/smoketest/postcyclerules_test.go b/smoketest/postcyclerules_test.go new file mode 100644 index 0000000000..e49bf6dbba --- /dev/null +++ b/smoketest/postcyclerules_test.go @@ -0,0 +1,71 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" + "time" +) + +// TestPostCycleRules checks that new rules added after the last +// rule of a policy executes are handled the same way as during a policy cycle. +func TestPostCycleRules(t *testing.T) { + t.Parallel() + + sql := ` + insert into users (id, name, email) + values + ({{uuid "uid"}}, 'bob', 'joe'); + + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cid"}}, {{uuid "uid"}}, 'personal', 'SMS', {{phone "1"}}), + ({{uuid "cid2"}}, {{uuid "uid"}}, 'personal2', 'SMS', {{phone "2"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "uid"}}, {{uuid "cid2"}}, 0); + + insert into escalation_policies (id, name, repeat) + values + ({{uuid "eid"}}, 'esc policy', -1); + insert into escalation_policy_steps (id, escalation_policy_id, delay) + values + ({{uuid "esid"}}, {{uuid "eid"}}, 60); + + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "uid"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (service_id, description) + values + ({{uuid "sid"}}, 'testing'); + +` + + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + tw := h.Twilio() + d1 := tw.Device(h.Phone("1")) + d2 := tw.Device(h.Phone("2")) + + d2.ExpectSMS("testing") + tw.WaitAndAssert() + + // ADD RULES + h.AddNotificationRule(h.UUID("uid"), h.UUID("cid"), 0) + h.AddNotificationRule(h.UUID("uid"), h.UUID("cid"), 1) + + h.Delay(15 * time.Second) + // ensure no notification for instant rule + tw.WaitAndAssert() + + h.FastForward(time.Minute) + + d1.ExpectSMS("testing") + tw.WaitAndAssert() +} diff --git a/smoketest/prioritization_test.go b/smoketest/prioritization_test.go new file mode 100644 index 0000000000..7b31da22cd --- /dev/null +++ b/smoketest/prioritization_test.go @@ -0,0 +1,76 @@ +package smoketest + +import ( + "bytes" + "github.com/target/goalert/smoketest/harness" + "strconv" + "testing" +) + +// TestPrioritization tests that notifications for new users/alerts get +// priority over existing ones. +func TestPrioritization(t *testing.T) { + t.Parallel() + + sql := ` + insert into users (id, name, email) + values + ({{uuid "u1"}}, 'bob', 'joe'); + + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "u1"}}, 'personal', 'SMS', {{phone "1"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "u1"}}, {{uuid "cm1"}}, 0); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "u1"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "s1"}}, {{uuid "eid"}}, 'service1'), + ({{uuid "s2"}}, {{uuid "eid"}}, 'service2'); +` + + buf := bytes.NewBufferString(` + insert into alerts (service_id, description) + values + `) + for i := 0; i < 30; i++ { + if i > 0 { + buf.WriteString(",\n") + } + buf.WriteString(`({{uuid "s1"}}, 'service-1-alert-` + strconv.Itoa(i) + `')`) + } + buf.WriteString(";") + sql += buf.String() + + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + tw := h.Twilio() + d1 := tw.Device(h.Phone("1")) + + d1.ExpectSMS("service-1-alert") + d1.ExpectSMS("service-1-alert") + d1.IgnoreUnexpectedSMS("service-1-alert") + tw.WaitAndAssert() + + h.CreateAlert(h.UUID("s2"), "service-2-alert") + + d1.ExpectSMS("service-2-alert") + tw.WaitAndAssert() + + d1.ExpectSMS("service-1-alert") + tw.WaitAndAssert() +} diff --git a/smoketest/rotationdaily_test.go b/smoketest/rotationdaily_test.go new file mode 100644 index 0000000000..80735c95bf --- /dev/null +++ b/smoketest/rotationdaily_test.go @@ -0,0 +1,72 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" + "time" +) + +func TestRotation_Daily(t *testing.T) { + t.Parallel() + + const sql = ` + insert into users (id, name, email) + values + ({{uuid "uid1"}}, 'bob', 'joe'), + ({{uuid "uid2"}}, 'ben', 'frank'); + + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "uid1"}}, 'personal', 'SMS', {{phone "1"}}), + ({{uuid "cm2"}}, {{uuid "uid2"}}, 'personal', 'SMS', {{phone "2"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "uid1"}}, {{uuid "cm1"}}, 0), + ({{uuid "uid2"}}, {{uuid "cm2"}}, 0); + + insert into escalation_policies (id, name, repeat) + values + ({{uuid "eid"}}, 'esc policy', 1); + + insert into escalation_policy_steps (id, escalation_policy_id, delay) + values + ({{uuid "es1"}}, {{uuid "eid"}}, 60); + + insert into schedules (id, name, time_zone) + values + ({{uuid "sched1"}}, 'default', 'America/Chicago'); + + insert into rotations (id, schedule_id, name, type, start_time, shift_length) + values + ({{uuid "rot1"}}, {{uuid "sched1"}}, 'default rotation', 'daily', now(), 2); + + insert into rotation_participants (rotation_id, user_id, position) + values + ({{uuid "rot1"}}, {{uuid "uid1"}}, 0), + ({{uuid "rot1"}}, {{uuid "uid2"}}, 1); + + insert into escalation_policy_actions (escalation_policy_step_id, schedule_id) + values + ({{uuid "es1"}}, {{uuid "sched1"}}); + + insert into services (id, escalation_policy_id, name) values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + ` + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + sid := h.UUID("sid") + uid1 := h.UUID("uid1") + uid2 := h.UUID("uid2") + + h.WaitAndAssertOnCallUsers(sid, uid1) + + // Skipping ahead by an extra day to jump over DST changes. + // + // In the spring, one shift will be an hour short. + // In the fall, one shift will be an hour long. + h.FastForward(3 * 24 * time.Hour) + + h.WaitAndAssertOnCallUsers(sid, uid2) +} diff --git a/smoketest/rotationdst_test.go b/smoketest/rotationdst_test.go new file mode 100644 index 0000000000..62f3b57deb --- /dev/null +++ b/smoketest/rotationdst_test.go @@ -0,0 +1,100 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" + "time" +) + +func getLastDSTDate(t *testing.T, n time.Time, loc *time.Location) time.Time { + const cdt = -18000 + + _, offset := n.Zone() + + if offset == cdt { + for offset == cdt { + n = n.AddDate(0, -1, 0) + _, offset = n.Zone() + } + } else { + for offset != cdt { + n = n.AddDate(0, -1, 0) + _, offset = n.Zone() + } + + } + + return n +} + +// TestRotation_DST checks that schedules handle DST boundaries properly +func TestRotation_DST(t *testing.T) { + t.Parallel() + loc, err := time.LoadLocation("America/Chicago") + if err != nil { + t.Fatalf("could not load 'America/Chicago' tzdata: %v", err) + } + + // for this test, we make a daily rotation at the current time (- 1 minute) across the closest DST boundary + // then make sure the rotation flips after a minute. + // make it easy + + n := time.Now().In(loc) + start := getLastDSTDate(t, n, loc).Add(5 * time.Minute) + startStr := start.Format(time.RFC3339) + sql := ` + insert into users (id, name, email) + values + ({{uuid "uid1"}}, 'bob', 'joe'), + ({{uuid "uid2"}}, 'ben', 'frank'); + + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "uid1"}}, 'personal', 'SMS', {{phone "1"}}), + ({{uuid "cm2"}}, {{uuid "uid2"}}, 'personal', 'SMS', {{phone "2"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "uid1"}}, {{uuid "cm1"}}, 0), + ({{uuid "uid2"}}, {{uuid "cm2"}}, 0); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + + insert into rotations (id, name, type, start_time, shift_length, time_zone) + values + ({{uuid "rot1"}}, 'default rotation', 'daily', '` + startStr + `',1, 'America/Chicago'); + + insert into rotation_participants (rotation_id, user_id, position) + values + ({{uuid "rot1"}}, {{uuid "uid1"}}, 0), + ({{uuid "rot1"}}, {{uuid "uid2"}}, 1); + + insert into escalation_policy_actions (escalation_policy_step_id, rotation_id) + values + ({{uuid "esid"}}, {{uuid "rot1"}}); + + insert into services (id, escalation_policy_id, name) values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (service_id, description) values + ({{uuid "sid"}}, 'testing'); + + ` + h := harness.NewHarness(t, sql, "ev3-rotation-state") + defer h.Close() + + sid := h.UUID("sid") + uid1 := h.UUID("uid1") + uid2 := h.UUID("uid2") + + h.WaitAndAssertOnCallUsers(sid, uid1) + + h.FastForward(10 * time.Minute) + + h.WaitAndAssertOnCallUsers(sid, uid2) +} diff --git a/smoketest/rotationgap_test.go b/smoketest/rotationgap_test.go new file mode 100644 index 0000000000..7fe6dffaf3 --- /dev/null +++ b/smoketest/rotationgap_test.go @@ -0,0 +1,64 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" +) + +func TestRotationGap(t *testing.T) { + t.Parallel() + + const sql = ` + set timezone = 'America/Chicago'; + + insert into users (id, name, email) + values + ({{uuid "uid1"}}, 'bob', 'joe'), + ({{uuid "uid2"}}, 'ben', 'frank'); + + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "uid1"}}, 'personal', 'SMS', {{phone "1"}}), + ({{uuid "cm2"}}, {{uuid "uid2"}}, 'personal', 'SMS', {{phone "2"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "uid1"}}, {{uuid "cm1"}}, 0), + ({{uuid "uid2"}}, {{uuid "cm2"}}, 0); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + + insert into rotations (id, name, type, start_time, shift_length, time_zone) + values + ({{uuid "rot1"}}, 'default rotation', 'weekly', now(), 1, 'America/Chicago'); + + insert into rotation_participants (rotation_id, user_id, position) + values + ({{uuid "rot1"}}, {{uuid "uid1"}}, 1), + ({{uuid "rot1"}}, {{uuid "uid2"}}, 2); + + insert into escalation_policy_actions (escalation_policy_step_id, rotation_id) + values + ({{uuid "esid"}}, {{uuid "rot1"}}); + + insert into services (id, escalation_policy_id, name) values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (service_id, description) values + ({{uuid "sid"}}, 'testing'); + + ` + + h := harness.NewHarness(t, sql, "add-verification-code") + defer h.Close() + + sid := h.UUID("sid") + uid1 := h.UUID("uid1") + + h.WaitAndAssertOnCallUsers(sid, uid1) +} diff --git a/smoketest/rotationhourly_test.go b/smoketest/rotationhourly_test.go new file mode 100644 index 0000000000..02f06d93dc --- /dev/null +++ b/smoketest/rotationhourly_test.go @@ -0,0 +1,77 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" + "time" +) + +func TestRotation_Hourly(t *testing.T) { + t.Parallel() + + const sql = ` + insert into users (id, name, email) + values + ({{uuid "uid1"}}, 'bob', 'joe'), + ({{uuid "uid2"}}, 'ben', 'frank'); + + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "uid1"}}, 'personal', 'SMS', {{phone "1"}}), + ({{uuid "cm2"}}, {{uuid "uid2"}}, 'personal', 'SMS', {{phone "2"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "uid1"}}, {{uuid "cm1"}}, 0), + ({{uuid "uid2"}}, {{uuid "cm2"}}, 0); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + + insert into schedules (id, name, time_zone) + values + ({{uuid "sched1"}}, 'default', 'America/Chicago'); + + insert into rotations (id, schedule_id, name, type, start_time, shift_length) + values + ({{uuid "rot1"}}, {{uuid "sched1"}}, 'default rotation', 'hourly', now(), 1); + + insert into rotation_participants (rotation_id, user_id, position) + values + ({{uuid "rot1"}}, {{uuid "uid1"}}, 0), + ({{uuid "rot1"}}, {{uuid "uid2"}}, 1); + + insert into escalation_policy_actions (escalation_policy_step_id, schedule_id) + values + ({{uuid "esid"}}, {{uuid "sched1"}}); + + insert into services (id, escalation_policy_id, name) values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (service_id, description) values + ({{uuid "sid"}}, 'testing'); + + ` + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + tw := h.Twilio() + d1 := tw.Device(h.Phone("1")) + + h.Escalate(1, 0) + + d1.ExpectSMS("testing") + tw.WaitAndAssert() + + h.FastForward(time.Hour) + + sid := h.UUID("sid") + uid2 := h.UUID("uid2") + + h.WaitAndAssertOnCallUsers(sid, uid2) + +} diff --git a/smoketest/rotationweekly_test.go b/smoketest/rotationweekly_test.go new file mode 100644 index 0000000000..b9c7e5b9f0 --- /dev/null +++ b/smoketest/rotationweekly_test.go @@ -0,0 +1,73 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" + "time" +) + +func TestRotation_Weekly(t *testing.T) { + t.Parallel() + + const sql = ` + set timezone = 'America/Chicago'; + + insert into users (id, name, email) + values + ({{uuid "uid1"}}, 'bob', 'joe'), + ({{uuid "uid2"}}, 'ben', 'frank'); + + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "uid1"}}, 'personal', 'SMS', {{phone "1"}}), + ({{uuid "cm2"}}, {{uuid "uid2"}}, 'personal', 'SMS', {{phone "2"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "uid1"}}, {{uuid "cm1"}}, 0), + ({{uuid "uid2"}}, {{uuid "cm2"}}, 0); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + + insert into schedules (id, name, time_zone) + values + ({{uuid "sched1"}}, 'default', 'America/Chicago'); + + insert into rotations (id, schedule_id, name, type, start_time, shift_length) + values + ({{uuid "rot1"}}, {{uuid "sched1"}}, 'default rotation', 'weekly', now(), 1); + + insert into rotation_participants (rotation_id, user_id, position) + values + ({{uuid "rot1"}}, {{uuid "uid1"}}, 0), + ({{uuid "rot1"}}, {{uuid "uid2"}}, 1); + + insert into escalation_policy_actions (escalation_policy_step_id, schedule_id) + values + ({{uuid "esid"}}, {{uuid "sched1"}}); + + insert into services (id, escalation_policy_id, name) values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + ` + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + sid := h.UUID("sid") + uid1 := h.UUID("uid1") + uid2 := h.UUID("uid2") + + h.WaitAndAssertOnCallUsers(sid, uid1) + + // Skip ahead by an extra day to step around DST changes. + // + // In the spring, one weekly shift would be short 1 hour. + // In the fall, one weekly shift will be 1 hour longer. + h.FastForward(8 * 24 * time.Hour) + + h.WaitAndAssertOnCallUsers(sid, uid2) +} diff --git a/smoketest/rotationwrap_test.go b/smoketest/rotationwrap_test.go new file mode 100644 index 0000000000..e476199f99 --- /dev/null +++ b/smoketest/rotationwrap_test.go @@ -0,0 +1,74 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" + "time" +) + +// TestRotation_Wrap checks that rotations wrap & repeat +func TestRotation_Wrap(t *testing.T) { + t.Parallel() + + const sql = ` + insert into users (id, name, email) + values + ({{uuid "uid1"}}, 'bob', 'joe'), + ({{uuid "uid2"}}, 'ben', 'frank'); + + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "uid1"}}, 'personal', 'SMS', {{phone "1"}}), + ({{uuid "cm2"}}, {{uuid "uid2"}}, 'personal', 'SMS', {{phone "2"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "uid1"}}, {{uuid "cm1"}}, 0), + ({{uuid "uid2"}}, {{uuid "cm2"}}, 0); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + + insert into schedules (id, name, time_zone) + values + ({{uuid "sched1"}}, 'default', 'America/Chicago'); + + insert into rotations (id, schedule_id, name, type, start_time, shift_length) + values + ({{uuid "rot1"}}, {{uuid "sched1"}}, 'default rotation', 'hourly', now() - '4 hours'::interval + '10 minute'::interval, 2); + + insert into rotation_participants (rotation_id, user_id, position) + values + ({{uuid "rot1"}}, {{uuid "uid1"}}, 0), + ({{uuid "rot1"}}, {{uuid "uid2"}}, 1); + + insert into escalation_policy_actions (escalation_policy_step_id, schedule_id) + values + ({{uuid "esid"}}, {{uuid "sched1"}}); + + insert into services (id, escalation_policy_id, name) values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (service_id, description) values + ({{uuid "sid"}}, 'testing'); + + ` + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + // with an hourly shift_length of 2, and 2 users; 3hr59min should be user 2, a minute later should be user 1 + sid := h.UUID("sid") + uid1 := h.UUID("uid1") + uid2 := h.UUID("uid2") + + h.WaitAndAssertOnCallUsers(sid, uid2) + + h.FastForward(20 * time.Minute) + + h.WaitAndAssertOnCallUsers(sid, uid1) + +} diff --git a/smoketest/schedulerule_test.go b/smoketest/schedulerule_test.go new file mode 100644 index 0000000000..09fa5315be --- /dev/null +++ b/smoketest/schedulerule_test.go @@ -0,0 +1,77 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" + "time" +) + +// TestScheduleRule performs the following checks: +// - A schedule rule "shift" can end with a past shift in the DB (bug found in dev) +// - Schedule rule time constraints are evaluated correctly +func TestScheduleRule(t *testing.T) { + t.Parallel() + + sql := ` + insert into users (id, name, email) + values + ({{uuid "u1"}}, 'bob', 'joe'), + ({{uuid "u2"}}, 'ben', 'josh'); + + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "u1"}}, 'personal', 'SMS', {{phone "1"}}), + ({{uuid "cm2"}}, {{uuid "u2"}}, 'personal', 'SMS', {{phone "2"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "u1"}}, {{uuid "cm1"}}, 0), + ({{uuid "u2"}}, {{uuid "cm2"}}, 0); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + + insert into schedules (id, name, description, time_zone) + values + ({{uuid "sched"}}, 'test', 'test', 'America/Chicago'); + + insert into schedule_rules (schedule_id, start_time, end_time, tgt_user_id) + values + ({{uuid "sched"}}, cast((now()-'5 minutes'::interval) at time zone 'America/Chicago' as time without time zone), cast((now()+'5 minutes'::interval) at time zone 'America/Chicago' as time without time zone), {{uuid "u1"}}), + ({{uuid "sched"}}, cast((now()+'5 minutes'::interval) at time zone 'America/Chicago' as time without time zone), cast((now()+'15 minutes'::interval) at time zone 'America/Chicago' as time without time zone), {{uuid "u2"}}); + + insert into escalation_policy_actions (escalation_policy_step_id, schedule_id) + values + ({{uuid "esid"}}, {{uuid "sched"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (service_id, description) + values + ({{uuid "sid"}}, 'testing'); + + insert into schedule_on_call_users (schedule_id, start_time, end_time, user_id) + values + ({{uuid "sched"}}, now()-'2 hours'::interval, now()-'1 hour'::interval, {{uuid "u1"}}); + +` + h := harness.NewHarness(t, sql, "npcycle-indexes") + defer h.Close() + + sid := h.UUID("sid") + u1 := h.UUID("u1") + u2 := h.UUID("u2") + + h.WaitAndAssertOnCallUsers(sid, u1) + + h.FastForward(10 * time.Minute) + + h.WaitAndAssertOnCallUsers(sid, u2) +} diff --git a/smoketest/simplenotification_india_test.go b/smoketest/simplenotification_india_test.go new file mode 100644 index 0000000000..80ca10d094 --- /dev/null +++ b/smoketest/simplenotification_india_test.go @@ -0,0 +1,68 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" +) + +// TestSimpleNotifications_India tests that SMS and Voice goes out for +// 1 alert -> service -> esc -> step -> user. 2 rules (1 of each) immediately. +// +// Currently, country code '+222' is used as a negative test. If we support +// 222 in the future, this test will need to be updated. +func TestSimpleNotifications_India(t *testing.T) { + t.Parallel() + + sql := ` + insert into users (id, name, email) + values + ({{uuid "user"}}, 'bob', 'joe'); + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'SMS', {{phoneCC "+91" "1"}}), + ({{uuid "cm2"}}, {{uuid "user"}}, 'personal', 'VOICE', {{phoneCC "+91" "1"}}), + ({{uuid "cm3"}}, {{uuid "user"}}, 'personal2', 'SMS', {{phoneCC "+222" "1"}}), + ({{uuid "cm4"}}, {{uuid "user"}}, 'personal2', 'VOICE', {{phoneCC "+222" "1"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "user"}}, {{uuid "cm1"}}, 0), + ({{uuid "user"}}, {{uuid "cm2"}}, 0), + ({{uuid "user"}}, {{uuid "cm3"}}, 0), + ({{uuid "user"}}, {{uuid "cm4"}}, 0); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "user"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (service_id, description) + values + ({{uuid "sid"}}, 'testing'); + +` + h := harness.NewStoppedHarness(t, sql, "ids-to-uuids") + defer h.Close() + + // We are doing negative testing in that we expect the invalid country-codes + // to be rejected before being passed to Twilio. + h.IgnoreErrorsWith("send notification:") + h.IgnoreErrorsWith("all notification senders failed") + + h.Start() + + d1 := h.Twilio().Device(h.PhoneCC("+91", "1")) + + d1.ExpectSMS("testing") + d1.ExpectVoice("testing") + +} diff --git a/smoketest/simplenotification_test.go b/smoketest/simplenotification_test.go new file mode 100644 index 0000000000..597373f3d8 --- /dev/null +++ b/smoketest/simplenotification_test.go @@ -0,0 +1,54 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" +) + +// TestSimpleNotifications tests that SMS and Voice goes out for +// 1 alert -> service -> esc -> step -> user. 2 rules (1 of each) immediately. +func TestSimpleNotifications(t *testing.T) { + t.Parallel() + + sql := ` + insert into users (id, name, email) + values + ({{uuid "user"}}, 'bob', 'joe'); + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'SMS', {{phone "1"}}), + ({{uuid "cm2"}}, {{uuid "user"}}, 'personal', 'VOICE', {{phone "1"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "user"}}, {{uuid "cm1"}}, 0), + ({{uuid "user"}}, {{uuid "cm2"}}, 0); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "user"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (service_id, description) + values + ({{uuid "sid"}}, 'testing'); + +` + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + d1 := h.Twilio().Device(h.Phone("1")) + + d1.ExpectSMS("testing") + d1.ExpectVoice("testing") + +} diff --git a/smoketest/slackaddtoepstep_test.go b/smoketest/slackaddtoepstep_test.go new file mode 100644 index 0000000000..23926d74f3 --- /dev/null +++ b/smoketest/slackaddtoepstep_test.go @@ -0,0 +1,57 @@ +package smoketest + +import ( + "fmt" + "github.com/target/goalert/smoketest/harness" + "testing" +) + +// TestSlackAddToEPStep tests that slack channels can be added to an EPStep. +func TestSlackAddToEPStep(t *testing.T) { + t.Parallel() + + sql := ` + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); +` + h := harness.NewHarness(t, sql, "slack-user-link") + defer h.Close() + + doQL := func(t *testing.T, query string) { + g := h.GraphQLQuery2(query) + for _, err := range g.Errors { + t.Error("GraphQL Error:", err.Message) + } + if len(g.Errors) > 0 { + t.Fatal("errors returned from GraphQL") + } + + t.Log("Response:", string(g.Data)) + } + + channel := h.Slack().Channel("test") + + doQL(t, fmt.Sprintf(` + mutation { + createEscalationPolicyStep(input:{ + escalationPolicyID: "%s", + delayMinutes: 5, + targets: [{ + id: "%s", + type: slackChannel, + }], + }){ + id + } + } + `, h.UUID("eid"), channel.ID())) + + channel.ExpectMessage("testing") + h.CreateAlert(h.UUID("sid"), "testing") + +} diff --git a/smoketest/slackchannels_test.go b/smoketest/slackchannels_test.go new file mode 100644 index 0000000000..2865f35437 --- /dev/null +++ b/smoketest/slackchannels_test.go @@ -0,0 +1,65 @@ +package smoketest + +import ( + "encoding/json" + "github.com/target/goalert/smoketest/harness" + "sort" + "testing" +) + +// TestSlackChannels tests that slack channels are returned for configured users. +func TestSlackChannels(t *testing.T) { + t.Parallel() + + h := harness.NewHarness(t, "", "") + defer h.Close() + + ch := []harness.SlackChannel{ + h.Slack().Channel("foo"), + h.Slack().Channel("bar"), + h.Slack().Channel("baz"), + } + sort.Slice(ch, func(i, j int) bool { return ch[i].ID() < ch[j].ID() }) + + resp := h.GraphQLQuery2(`{slackChannels{nodes{id,name}}}`) + for _, err := range resp.Errors { + t.Error("graphql:", err.Message) + } + + var data struct { + SlackChannels struct { + Nodes []struct { + ID, Name string + } + } + } + err := json.Unmarshal(resp.Data, &data) + if err != nil { + t.Fatal("parse graphql response:", err) + } + channels := data.SlackChannels.Nodes + sort.Slice(channels, func(i, j int) bool { return channels[i].ID < channels[j].ID }) + + if len(channels) > len(ch) { + for _, n := range channels[len(ch)-1:] { + t.Errorf("got extra channel: ID=%s, Name=%s", n.ID, n.Name) + } + channels = channels[:len(ch)] + } + if len(channels) < len(ch) { + for _, c := range ch { + t.Errorf("missing channel: ID=%s, Name=%s", c.ID(), c.Name()) + } + ch = ch[:len(channels)] + } + + for i, n := range channels { + c := ch[i] + if c.ID() != n.ID { + t.Errorf("channel[%d].ID: got %s; want %s", i, n.ID, c.ID()) + } + if c.Name() != n.Name { + t.Errorf("channel[%d].Name: got %s; want %s", i, n.Name, c.Name()) + } + } +} diff --git a/smoketest/slacknotification_test.go b/smoketest/slacknotification_test.go new file mode 100644 index 0000000000..19f06b0b6a --- /dev/null +++ b/smoketest/slacknotification_test.go @@ -0,0 +1,37 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" +) + +// TestSlackNotification tests that slack channels are returned for configured users. +func TestSlackNotification(t *testing.T) { + t.Parallel() + + sql := ` + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + + insert into notification_channels (id, type, name, value) + values + ({{uuid "chan"}}, 'SLACK', '#test', {{slackChannelID "test"}}); + + insert into escalation_policy_actions (escalation_policy_step_id, channel_id) + values + ({{uuid "esid"}}, {{uuid "chan"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); +` + h := harness.NewHarness(t, sql, "slack-user-link") + defer h.Close() + + h.Slack().Channel("test").ExpectMessage("testing") + h.CreateAlert(h.UUID("sid"), "testing") +} diff --git a/smoketest/statusupdates_test.go b/smoketest/statusupdates_test.go new file mode 100644 index 0000000000..c8991320d1 --- /dev/null +++ b/smoketest/statusupdates_test.go @@ -0,0 +1,95 @@ +package smoketest + +import ( + "bytes" + "github.com/target/goalert/smoketest/harness" + "net/http" + "net/url" + "testing" + "time" +) + +// TestStatusUpdates checks basic functionality of status updates: +// +// - If alert_status_log_contact_method_id isnull, no notifications are sent +// - When alert_status_log_contact_method_id is set, old notifications are NOT sent +// - Status changes, when/after alert_status_log_contact_method_id is set, are sent. +func TestStatusUpdates(t *testing.T) { + t.Parallel() + + sql := ` + insert into users (id, name, email, role) + values + ({{uuid "user"}}, 'bob', 'joe@test.com', 'admin'); + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'SMS', {{phone "1"}}); + + update users set alert_status_log_contact_method_id = {{uuid "cm1"}} + where id = {{uuid "user"}}; + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "user"}}, {{uuid "cm1"}}, 0); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "user"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into integration_keys (id, service_id, type, name) + values + ({{uuid "int1"}}, {{uuid "sid"}}, 'generic', 'test'); + + insert into alerts (service_id, source, description) + values + ({{uuid "sid"}}, 'manual', 'first alert'), + ({{uuid "sid"}}, 'manual', 'second alert'); + +` + h := harness.NewHarness(t, sql, "alert-status-updates") + defer h.Close() + + doClose := func(summary string) { + u := h.URL() + "/v1/api/alerts?key=" + h.UUID("int1") + v := make(url.Values) + v.Set("summary", summary) + v.Set("action", "close") + resp, err := http.Post(u, "application/x-www-form-urlencoded", bytes.NewBufferString(v.Encode())) + if err != nil { + t.Fatal("post to generic endpoint failed:", err) + } else if resp.StatusCode/100 != 2 { + t.Error("non-2xx response:", resp.Status) + } + resp.Body.Close() + } + + tw := h.Twilio() + d1 := tw.Device(h.Phone("1")) + + d1.ExpectSMS("first alert") + d1.ExpectSMS("second alert") + tw.WaitAndAssert() + + doClose("first alert") + + h.Delay(15 * time.Second) // ensure no additional notifications sent + tw.WaitAndAssert() + + doClose("second alert") + + // expect (1) status notification + d1.ExpectSMS("closed") + h.Delay(15 * time.Second) // ensure no additional notifications sent + + tw.WaitAndAssert() +} diff --git a/smoketest/systemlimits_test.go b/smoketest/systemlimits_test.go new file mode 100644 index 0000000000..f902c841a3 --- /dev/null +++ b/smoketest/systemlimits_test.go @@ -0,0 +1,407 @@ +package smoketest + +import ( + "encoding/json" + "fmt" + "github.com/target/goalert/limit" + "github.com/target/goalert/smoketest/harness" + "sort" + "strconv" + "strings" + "testing" + "time" +) + +// TestSystemLimits tests that limits are enforced if configured. +func TestSystemLimits(t *testing.T) { + t.Parallel() + + const sql = ` + insert into users (id, name) + values + ({{uuid "cm_user"}}, 'CM User'), + ({{uuid "nr_user"}}, 'NR User'), + ({{uuid "ep_act_user1"}}, 'Step 1'), + ({{uuid "ep_act_user2"}}, 'Step 2'), + ({{uuid "ep_act_user3"}}, 'Step 3'), + ({{uuid "ep_act_user4"}}, 'Step 4'), + ({{uuid "part_user"}}, 'Part User'), + ({{uuid "rule_user"}}, 'Sched Rule User'), + ({{uuid "tgt_user1"}}, 'Target 1'), + ({{uuid "tgt_user2"}}, 'Target 2'), + ({{uuid "tgt_user3"}}, 'Target 3'), + ({{uuid "tgt_user4"}}, 'Target 4'); + + insert into schedules (id, name, time_zone) + values + ({{uuid "rule_sched"}}, 'Rule Test', 'UTC'), + ({{uuid "tgt_sched"}}, 'Target Test', 'UTC'); + + insert into rotations (id, name, type, time_zone) + values + ({{uuid "part_rot"}}, 'Part Rotation', 'daily', 'UTC'); + + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "nr_cm"}}, {{uuid "nr_user"}}, 'Test', 'SMS', {{phone "nr"}}); + + insert into escalation_policies (id, name) + values + ({{uuid "unack_ep1"}}, 'Unack Test 1'), + ({{uuid "unack_ep2"}}, 'Unack Test 2'), + ({{uuid "int_key_ep"}}, 'Int Key Test'), + ({{uuid "hb_ep"}}, 'Heartbeat Test'), + ({{uuid "step_ep"}}, 'Step Test'), + ({{uuid "act_ep"}}, 'Action Test'), + ({{uuid "act_ep2"}}, 'Action Test 2'); + + insert into escalation_policy_steps (id, escalation_policy_id, delay) + values + ({{uuid "act_ep_step"}}, {{uuid "act_ep"}}, 15), + ({{uuid "act_ep_step2"}}, {{uuid "act_ep2"}}, 15); + + insert into services (id, name, escalation_policy_id) + values + ({{uuid "int_key_svc"}}, 'Int Key Test', {{uuid "int_key_ep"}}), + ({{uuid "hb_svc"}}, 'Heartbeat Test', {{uuid "hb_ep"}}), + ({{uuid "unack_svc1"}}, 'Unack Test 1', {{uuid "unack_ep1"}}), + ({{uuid "unack_svc2"}}, 'Unack Test 2', {{uuid "unack_ep2"}}); +` + + h := harness.NewHarness(t, sql, "limit-configuration") + defer h.Close() + + type idParser func(m map[string]interface{}) (string, bool) + + var getID idParser + getID = func(m map[string]interface{}) (string, bool) { + if id, ok := m["id"].(string); ok { + return id, true + } + if id, ok := m["id"].(float64); ok { + return strconv.Itoa(int(id)), true + } + for _, v := range m { + if vm, ok := v.(map[string]interface{}); ok { + id, ok := getID(vm) + if ok { + return id, true + } + } + } + return "", false + } + + doQLErr := func(t *testing.T, query string, getID idParser) (string, string) { + g := h.GraphQLQueryT(t, query, "/v1/graphql") + errs := len(g.Errors) + if errs > 1 { + for _, err := range g.Errors { + t.Logf(err.Message) + } + t.Fatalf("got %d errors; want 0 or 1", len(g.Errors)) + } + if len(g.Errors) == 0 { + + var m map[string]interface{} + + err := json.Unmarshal(g.Data, &m) + if err != nil { + t.Fatalf("got err='%s'; want nil", err.Error()) + } + id, _ := getID(m) + return id, "" + } + + return "", g.Errors[0].Message + } + + doTest := func(limitID limit.ID, expErrMsg string, addQuery func(int) string, delQuery func(int, string) string, parseID idParser) { + if parseID == nil { + parseID = getID + } + t.Run(string(limitID), func(t *testing.T) { + /* + Sequence: + 1. create 3 + 2. set limit to 2 + 3. create (should fail) + 4. delete x2 + 5. create (should work) + 6. create (should fail) + 7. set limit to -1 + 8. create (should work) + */ + noErr := func(id, res string) string { + t.Helper() + if res == "" { + return id + } + t.Fatalf("got err='%s'; want nil", res) + panic("test did not abort") + } + mustErr := func(id, res string) { + t.Helper() + if !strings.Contains(res, expErrMsg) { + t.Fatalf("err='%s'; want substring '%s'", res, expErrMsg) + } + } + setLimit := func(max int) { + t.Helper() + noErr(doQLErr(t, fmt.Sprintf(`mutation{updateConfigLimit(input:{id: %s, max: %d}){id}}`, limitID, max), parseID)) + } + ids := []string{ // create 3 + noErr(doQLErr(t, addQuery(1), parseID)), + noErr(doQLErr(t, addQuery(2), parseID)), + noErr(doQLErr(t, addQuery(3), parseID)), + } + setLimit(2) // set limit to 2 + mustErr(doQLErr(t, addQuery(4), parseID)) // create should fail + noErr(doQLErr(t, delQuery(2, ids[2]), parseID)) // delete 2 + noErr(doQLErr(t, delQuery(1, ids[1]), parseID)) + + noErr(doQLErr(t, addQuery(2), parseID)) // should be able to create 1 more + mustErr(doQLErr(t, addQuery(3), parseID)) // but only one + + setLimit(-1) + + noErr(doQLErr(t, addQuery(3), parseID)) // no more limit + }) + } + + var n int + name := func() string { + n++ + return fmt.Sprintf("Thing %d", n) + } + + doTest( + limit.ContactMethodsPerUser, + "contact methods", + func(int) string { + return fmt.Sprintf(`mutation{createContactMethod(input:{type: SMS, name: "%s", value: "%s", user_id: "%s"}){id}}`, name(), h.Phone(""), h.UUID("cm_user")) + }, + func(_ int, id string) string { + return fmt.Sprintf(`mutation{deleteContactMethod(input:{id: "%s"}){id: deleted_id}}`, id) + }, + nil, + ) + + nrDelay := 0 + doTest( + limit.NotificationRulesPerUser, + "notification rules", + func(int) string { + nrDelay++ + return fmt.Sprintf(`mutation{createNotificationRule(input:{contact_method_id: "%s", delay_minutes: %d, user_id: "%s"}){id}}`, h.UUID("nr_cm"), nrDelay, h.UUID("nr_user")) + }, + func(_ int, id string) string { + return fmt.Sprintf(`mutation{deleteNotificationRule(input:{id: "%s"}){id: deleted_id}}`, id) + }, + nil, + ) + + doTest( + limit.EPStepsPerPolicy, + "steps", + func(int) string { + return fmt.Sprintf(`mutation{createOrUpdateEscalationPolicyStep(input:{escalation_policy_id: "%s", delay_minutes: 10, user_ids: [], schedule_ids: []}){escalation_policy_step{id}}}`, h.UUID("step_ep")) + }, + func(_ int, id string) string { + return fmt.Sprintf(`mutation{deleteEscalationPolicyStep(input:{id:"%s"}){id: deleted_id}}`, id) + }, + nil, + ) + + actUsersList := []string{h.UUID("ep_act_user1"), h.UUID("ep_act_user2"), h.UUID("ep_act_user3"), h.UUID("ep_act_user4")} + actUsers := func(n int) string { + return fmt.Sprintf(`"%s"`, strings.Join(actUsersList[:n], `","`)) + } + doTest( + limit.EPActionsPerStep, + "actions", + func(n int) string { + return fmt.Sprintf(`mutation{createOrUpdateEscalationPolicyStep(input:{id:"%s", escalation_policy_id: "%s", delay_minutes: 15, schedule_ids: [], user_ids: [%s]}){escalation_policy_step{id}}}`, + h.UUID("act_ep_step"), + h.UUID("act_ep"), + actUsers(n), + ) + }, + func(n int, _ string) string { + return fmt.Sprintf(`mutation{createOrUpdateEscalationPolicyStep(input:{id:"%s", escalation_policy_id: "%s", delay_minutes: 15, schedule_ids: [], user_ids: [%s]}){escalation_policy_step{id}}}`, + h.UUID("act_ep_step"), + h.UUID("act_ep"), + actUsers(n), + ) + }, + nil, + ) + doTest( + limit.EPActionsPerStep, + "actions", + func(n int) string { + return fmt.Sprintf(`mutation{addEscalationPolicyStepTarget(input:{step_id:"%s", target_id: "%s", target_type: user}){id: target_id}}`, + h.UUID("act_ep_step2"), + actUsersList[n-1], + ) + }, + func(_ int, id string) string { + return fmt.Sprintf(`mutation{deleteEscalationPolicyStepTarget(input:{step_id:"%s", target_id: "%s", target_type: user}){id: target_id}}`, + h.UUID("act_ep_step2"), + id, + ) + }, + nil, + ) + + doTest( + limit.ParticipantsPerRotation, + "participants", + func(int) string { + return fmt.Sprintf(`mutation{addRotationParticipant(input:{user_id: "%s", rotation_id: "%s"}){id}}`, h.UUID("part_user"), h.UUID("part_rot")) + }, + func(_ int, id string) string { + return fmt.Sprintf(`mutation{deleteRotationParticipant(input:{id: "%s"}){id: deleted_id}}`, id) + }, + nil, + ) + + doTest( + limit.IntegrationKeysPerService, + "integration keys", + func(int) string { + return fmt.Sprintf(`mutation{createIntegrationKey(input:{service_id: "%s", type: generic, name:"%s"}){id}}`, h.UUID("int_key_svc"), name()) + }, + func(_ int, id string) string { + return fmt.Sprintf(`mutation{deleteIntegrationKey(input:{id: "%s"}){id: deleted_id}}`, id) + }, + nil, + ) + + doTest( + limit.HeartbeatMonitorsPerService, + "heartbeat monitors", + func(int) string { + return fmt.Sprintf(`mutation{createAll(input:{heartbeat_monitors: [{interval_minutes:5,service_id: "%s", name: "%s"}]}){heartbeat_monitors {id}}}`, h.UUID("hb_svc"), name()) + }, + func(_ int, id string) string { + return fmt.Sprintf(`mutation{deleteHeartbeatMonitor(input:{id: "%s"}){id: deleted_id}}`, id) + }, + func(m map[string]interface{}) (string, bool) { + c, ok := m["createAll"].(map[string]interface{}) + if !ok { + return "", false + } + asn := c["heartbeat_monitors"].([]interface{}) + return asn[0].(map[string]interface{})["id"].(string), true + }, + ) + + // schedule tests (need custom parser) + s := time.Date(2005, 0, 0, 0, 0, 0, 0, time.UTC) + startTime := func() string { + s = s.Add(time.Minute) + return s.Format("15:04") + } + doTest( + limit.RulesPerSchedule, + "rules", + func(int) string { + return fmt.Sprintf(`mutation{createScheduleRule(input:{ + schedule_id: "%s", + target_id: "%s", + target_type: user, + sunday: false,monday: false,tuesday: false,wednesday: false,thursday: false,friday: false,saturday: false, + start: "%s", + end: "%s" + }){assignments(start_time: "%s", end_time: "%s"){rules{id, start}}}}`, + h.UUID("rule_sched"), + h.UUID("rule_user"), + startTime(), + startTime(), + s.Format(time.RFC3339), + s.Format(time.RFC3339), + ) + }, + func(_ int, id string) string { + return fmt.Sprintf(`mutation{deleteScheduleRule(input:{id: "%s"}){id}}`, id) + }, + func(m map[string]interface{}) (string, bool) { + sched, ok := m["createScheduleRule"].(map[string]interface{}) + if !ok { + return "", false + } + asn := sched["assignments"].([]interface{}) + rules := asn[0].(map[string]interface{})["rules"].([]interface{}) + sort.Slice(rules, func(i, j int) bool { + return rules[i].(map[string]interface{})["start"].(string) < rules[j].(map[string]interface{})["start"].(string) + }) + return rules[len(rules)-1].(map[string]interface{})["id"].(string), true + }, + ) + + tgtUsersList := []string{h.UUID("tgt_user1"), h.UUID("tgt_user2"), h.UUID("tgt_user3"), h.UUID("tgt_user4")} + doTest( + limit.TargetsPerSchedule, + "targets", + func(n int) string { + return fmt.Sprintf(`mutation{createScheduleRule(input:{ + schedule_id: "%s", + target_id: "%s", + target_type: user, + sunday: false,monday: false,tuesday: false,wednesday: false,thursday: false,friday: false,saturday: false, + start: "%s", + end: "%s" + }){assignments(start_time: "%s", end_time: "%s"){rules{id, start}}}}`, + h.UUID("tgt_sched"), + tgtUsersList[n-1], + startTime(), + startTime(), + s.Format(time.RFC3339), + s.Format(time.RFC3339), + ) + }, + func(_ int, id string) string { + return fmt.Sprintf(`mutation{deleteScheduleRule(input:{id: "%s"}){id}}`, id) + }, + func(m map[string]interface{}) (string, bool) { + sched, ok := m["createScheduleRule"].(map[string]interface{}) + if !ok { + return "", false + } + asn := sched["assignments"].([]interface{}) + var rules []interface{} + for _, a := range asn { + rules = append(rules, a.(map[string]interface{})["rules"].([]interface{})...) + } + sort.Slice(rules, func(i, j int) bool { + return rules[i].(map[string]interface{})["start"].(string) < rules[j].(map[string]interface{})["start"].(string) + }) + return rules[len(rules)-1].(map[string]interface{})["id"].(string), true + }, + ) + + doTest( + limit.UnackedAlertsPerService, + "unacknowledged alerts", + func(int) string { + return fmt.Sprintf(`mutation{createAlert(input:{service_id: "%s", description: "%s"}){id: _id}}`, h.UUID("unack_svc1"), name()) + }, + func(_ int, id string) string { + return fmt.Sprintf(`mutation{updateAlertStatus(input:{id:%s, status: acknowledged}){id}}`, id) + }, + nil, + ) + doTest( + limit.UnackedAlertsPerService, + "unacknowledged alerts", + func(int) string { + return fmt.Sprintf(`mutation{createAlert(input:{service_id: "%s", description: "%s"}){id: _id}}`, h.UUID("unack_svc2"), name()) + }, + func(_ int, id string) string { + return fmt.Sprintf(`mutation{updateAlertStatus(input:{id:%s, status: closed}){id}}`, id) + }, + nil, + ) + +} diff --git a/smoketest/twilioenablebysms_test.go b/smoketest/twilioenablebysms_test.go new file mode 100644 index 0000000000..21ed34ebc4 --- /dev/null +++ b/smoketest/twilioenablebysms_test.go @@ -0,0 +1,77 @@ +package smoketest + +import ( + "fmt" + "github.com/target/goalert/smoketest/harness" + "testing" +) + +// TestTwilioEnableBySMS checks that all contact methods with the same value and of the same user are enabled when the user responds via SMS with the correct code. +func TestTwilioEnableBySMS(t *testing.T) { + t.Parallel() + + sqlQuery := ` + insert into users (id, name, email) + values + ({{uuid "user"}}, 'bob', 'joe'); + insert into user_contact_methods (id, user_id, name, type, value, disabled) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'SMS', {{phone "1"}}, true), + ({{uuid "cm2"}}, {{uuid "user"}}, 'personal', 'VOICE', {{phone "1"}}, true); + insert into user_verification_codes (id, user_id, contact_method_value, code, expires_at) + values + ({{uuid "id"}}, {{uuid "user"}}, {{phone "1"}}, 123456, now() + '15 minutes'::interval) +` + h := harness.NewHarness(t, sqlQuery, "add-verification-code") + defer h.Close() + + doQL := func(query string) { + g := h.GraphQLQuery(query) + for _, err := range g.Errors { + t.Error("GraphQL Error:", err.Message) + } + if len(g.Errors) > 0 { + t.Fatal("errors returned from GraphQL") + } + } + + cm1 := h.UUID("cm1") + cm2 := h.UUID("cm2") + + doQL(fmt.Sprintf(` + mutation { + verifyContactMethod(input: { + contact_method_id: "%s", + verification_code: %d, + }) { + contact_method_ids + } + } + `, cm1, 123456)) + + // All contact methods that have same value and of the same user should be enabled now. + doQL(fmt.Sprintf(` + mutation { + sendContactMethodTest(input:{ + contact_method_id: "%s", + }){ + id + } + } + `, cm1)) + + d1 := h.Twilio().Device(h.Phone("1")) + d1.ExpectSMS("test") + + doQL(fmt.Sprintf(` + mutation { + sendContactMethodTest(input:{ + contact_method_id: "%s", + }){ + id + } + } + `, cm2)) + + d1.ExpectVoice("test") +} diff --git a/smoketest/twilioenablebyvoice_test.go b/smoketest/twilioenablebyvoice_test.go new file mode 100644 index 0000000000..1dfdaf2aca --- /dev/null +++ b/smoketest/twilioenablebyvoice_test.go @@ -0,0 +1,77 @@ +package smoketest + +import ( + "fmt" + "github.com/target/goalert/smoketest/harness" + "testing" +) + +// TestTwilioEnablebyVoice checks that all contact methods with the same value and of the same user are enabled when the user responds via Voice with the correct code. +func TestTwilioEnablebyVoice(t *testing.T) { + t.Parallel() + + sqlQuery := ` + insert into users (id, name, email) + values + ({{uuid "user"}}, 'bob', 'joe'); + insert into user_contact_methods (id, user_id, name, type, value, disabled) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'SMS', {{phone "1"}}, true), + ({{uuid "cm2"}}, {{uuid "user"}}, 'personal', 'VOICE', {{phone "1"}}, true); + insert into user_verification_codes (id, user_id, contact_method_value, code, expires_at) + values + ({{uuid "id"}}, {{uuid "user"}}, {{phone "1"}}, 123456, now() + '15 minutes'::interval) +` + h := harness.NewHarness(t, sqlQuery, "add-verification-code") + defer h.Close() + + doQL := func(query string) { + g := h.GraphQLQuery(query) + for _, err := range g.Errors { + t.Error("GraphQL Error:", err.Message) + } + if len(g.Errors) > 0 { + t.Fatal("errors returned from GraphQL") + } + } + + cm1 := h.UUID("cm1") + cm2 := h.UUID("cm2") + + doQL(fmt.Sprintf(` + mutation { + verifyContactMethod(input: { + contact_method_id: "%s", + verification_code: %d, + }) { + contact_method_ids + } + } + `, cm2, 123456)) + + // All contact methods that have same value and of the same user should be enabled now. + doQL(fmt.Sprintf(` + mutation { + sendContactMethodTest(input:{ + contact_method_id: "%s", + }){ + id + } + } + `, cm1)) + d1 := h.Twilio().Device(h.Phone("1")) + d1.ExpectSMS("test") + h.Twilio().WaitAndAssert() + + doQL(fmt.Sprintf(` + mutation { + sendContactMethodTest(input:{ + contact_method_id: "%s", + }){ + id + } + } + `, cm2)) + + d1.ExpectVoice("test") +} diff --git a/smoketest/twiliosmsack_test.go b/smoketest/twiliosmsack_test.go new file mode 100644 index 0000000000..de844fd244 --- /dev/null +++ b/smoketest/twiliosmsack_test.go @@ -0,0 +1,59 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" + "time" +) + +// TestTwilioSMSAck checks that an SMS ack message is processed. +func TestTwilioSMSAck(t *testing.T) { + t.Parallel() + + sql := ` + insert into users (id, name, email, role) + values + ({{uuid "user"}}, 'bob', 'joe', 'user'); + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'SMS', {{phone "1"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "user"}}, {{uuid "cm1"}}, 0), + ({{uuid "user"}}, {{uuid "cm1"}}, 1); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "user"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (id, service_id, description) + values + (198, {{uuid "sid"}}, 'testing'); + +` + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + tw := h.Twilio() + d1 := tw.Device(h.Phone("1")) + + d1.ExpectSMS("testing").ThenReply("ack198") + d1.ExpectSMS("acknowledged") + tw.WaitAndAssert() + + h.FastForward(time.Minute) + + h.Delay(time.Second * 15) + // no more messages +} diff --git a/smoketest/twiliosmsclose_test.go b/smoketest/twiliosmsclose_test.go new file mode 100644 index 0000000000..752f172b1a --- /dev/null +++ b/smoketest/twiliosmsclose_test.go @@ -0,0 +1,59 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" + "time" +) + +// TestTwilioSMSClose checks that an SMS close message is processed. +func TestTwilioSMSClose(t *testing.T) { + t.Parallel() + + sql := ` + insert into users (id, name, email, role) + values + ({{uuid "user"}}, 'bob', 'joe', 'user'); + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'SMS', {{phone "1"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "user"}}, {{uuid "cm1"}}, 0), + ({{uuid "user"}}, {{uuid "cm1"}}, 1); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "user"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (service_id, description) + values + ({{uuid "sid"}}, 'testing'); + +` + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + tw := h.Twilio() + d1 := tw.Device(h.Phone("1")) + + d1.ExpectSMS("testing").ThenReply("close 1") + d1.ExpectSMS("closed") + tw.WaitAndAssert() + + h.FastForward(time.Minute) + + h.Delay(time.Second * 15) + // no more messages +} diff --git a/smoketest/twiliosmsfailure_test.go b/smoketest/twiliosmsfailure_test.go new file mode 100644 index 0000000000..e5bc9700ec --- /dev/null +++ b/smoketest/twiliosmsfailure_test.go @@ -0,0 +1,53 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" +) + +// TestTwilioSMSFailure checks that an SMS delivery failure is retried. +func TestTwilioSMSFailure(t *testing.T) { + t.Parallel() + + sql := ` + insert into users (id, name, email) + values + ({{uuid "user"}}, 'bob', 'joe'); + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'SMS', {{phone "1"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "user"}}, {{uuid "cm1"}}, 0); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "user"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (service_id, description) + values + ({{uuid "sid"}}, 'testing'); + +` + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + tw := h.Twilio() + d1 := tw.Device(h.Phone("1")) + + d1.ExpectSMS("testing").RespondWithFailed() + + // should not retry when provider says it failed + h.Trigger() +} diff --git a/smoketest/twiliosmsreplycode_test.go b/smoketest/twiliosmsreplycode_test.go new file mode 100644 index 0000000000..103ade9cc8 --- /dev/null +++ b/smoketest/twiliosmsreplycode_test.go @@ -0,0 +1,72 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" +) + +// TestTwilioSMSReplyCode checks that reply codes work properly. +func TestTwilioSMSReplyCode(t *testing.T) { + t.Parallel() + + sql := ` + insert into users (id, name, email, role) + values + ({{uuid "user"}}, 'bob', 'joe', 'user'); + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'SMS', {{phone "1"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "user"}}, {{uuid "cm1"}}, 0), + ({{uuid "user"}}, {{uuid "cm1"}}, 1); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "user"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service');; + +` + + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + tw := h.Twilio() + d1 := tw.Device(h.Phone("1")) + + h.CreateAlert(h.UUID("sid"), "test1") + d1.ExpectSMS("test1", "1c", "1a").ThenReply("1a") + d1.ExpectSMS("Acknowledged", "#1") + + tw.WaitAndAssert() + + h.CreateAlert(h.UUID("sid"), "test2") + d1.ExpectSMS("test2", "2c", "2a").ThenReply("1a") // ack again + d1.ExpectSMS("already", "ack").ThenReply("'1c'") // then close + d1.ExpectSMS("Closed", "#1") + + tw.WaitAndAssert() + + h.CreateAlert(h.UUID("sid"), "test3") + d1.ExpectSMS("test3", "1c", "1a").ThenReply("1 a") // 1 was re-used for alert #3 + + tw.WaitAndAssert() + + d1.ExpectSMS("Ack", "#3") + + tw.WaitAndAssert() + + h.CreateAlert(h.UUID("sid"), "test4") + d1.ExpectSMS("test4", "3c", "3a").ThenReply("close 4") // old method 'close alertID' still works + d1.ExpectSMS("Closed", "#4") +} diff --git a/smoketest/twiliosmsreplylast_test.go b/smoketest/twiliosmsreplylast_test.go new file mode 100644 index 0000000000..a16f44fe24 --- /dev/null +++ b/smoketest/twiliosmsreplylast_test.go @@ -0,0 +1,60 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" +) + +// TestTwilioSMSReplyLast checks that an SMS reply message is processed with no number. +func TestTwilioSMSReplyLast(t *testing.T) { + t.Parallel() + + sql := ` + insert into users (id, name, email, role) + values + ({{uuid "user"}}, 'bob', 'joe', 'user'); + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'SMS', {{phone "1"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "user"}}, {{uuid "cm1"}}, 0), + ({{uuid "user"}}, {{uuid "cm1"}}, 1); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "user"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (id, service_id, description) + values + (198, {{uuid "sid"}}, 'testing'); + +` + check := func(respondWith, expect string) { + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + tw := h.Twilio() + d1 := tw.Device(h.Phone("1")) + + d1.ExpectSMS("testing").ThenReply(respondWith) + d1.ExpectSMS(expect, "198") + tw.WaitAndAssert() + } + + check("ack", "acknowledged") + check("a", "acknowledged") + check("close", "closed") + check("c", "closed") +} diff --git a/smoketest/twiliosmsstop_test.go b/smoketest/twiliosmsstop_test.go new file mode 100644 index 0000000000..f13b1a1947 --- /dev/null +++ b/smoketest/twiliosmsstop_test.go @@ -0,0 +1,57 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" + "time" +) + +// TestTwilioSMSStop checks that an SMS STOP message is processed. +func TestTwilioSMSStop(t *testing.T) { + t.Parallel() + + sql := ` + insert into users (id, name, email) + values + ({{uuid "user"}}, 'bob', 'joe'); + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'SMS', {{phone "1"}}), + ({{uuid "cm2"}}, {{uuid "user"}}, 'personal', 'VOICE', {{phone "1"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "user"}}, {{uuid "cm1"}}, 0), + ({{uuid "user"}}, {{uuid "cm1"}}, 1), + ({{uuid "user"}}, {{uuid "cm2"}}, 1); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "user"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (service_id, description) + values + ({{uuid "sid"}}, 'testing'); + +` + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + h.Twilio().Device(h.Phone("1")).ExpectSMS("testing").ThenReply("stop") + h.Twilio().WaitAndAssert() + + h.FastForward(time.Minute) + + h.Delay(time.Second * 15) + // no more messages, it should have disabled both +} diff --git a/smoketest/twiliosmstrailingspace_test.go b/smoketest/twiliosmstrailingspace_test.go new file mode 100644 index 0000000000..37496de358 --- /dev/null +++ b/smoketest/twiliosmstrailingspace_test.go @@ -0,0 +1,53 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" +) + +// TestTwilioSMSTrailingSpace checks that an SMS ack message is processed even with trailing spaces in response. +func TestTwilioSMSTrailingSpace(t *testing.T) { + t.Parallel() + + sql := ` + insert into users (id, name, email, role) + values + ({{uuid "user"}}, 'bob', 'joe', 'user'); + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'SMS', {{phone "1"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "user"}}, {{uuid "cm1"}}, 0); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "user"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (id, service_id, description) + values + (198, {{uuid "sid"}}, 'testing'); + +` + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + tw := h.Twilio() + d1 := tw.Device(h.Phone("1")) + + d1.ExpectSMS("testing").ThenReply("ack198 ") + tw.WaitAndAssert() + + d1.ExpectSMS("acknowledged") +} diff --git a/smoketest/twiliosmsverification_test.go b/smoketest/twiliosmsverification_test.go new file mode 100644 index 0000000000..4013946ed2 --- /dev/null +++ b/smoketest/twiliosmsverification_test.go @@ -0,0 +1,100 @@ +package smoketest + +import ( + "fmt" + "github.com/target/goalert/smoketest/harness" + "strings" + "testing" + "time" +) + +// TestTwilioSMSVerification checks that a verification SMS is processed. +func TestTwilioSMSVerification(t *testing.T) { + t.Parallel() + + sqlQuery := ` + insert into users (id, name, email) + values + ({{uuid "user"}}, 'bob', 'joe'); + insert into user_contact_methods (id, user_id, name, type, value, disabled) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'SMS', {{phone "1"}}, true), + ({{uuid "cm2"}}, {{uuid "user"}}, 'personal', 'VOICE', {{phone "1"}}, true); + insert into user_notification_rules (id, user_id, delay_minutes, contact_method_id) + values + ({{uuid "nr1"}}, {{uuid "user"}}, 0, {{uuid "cm1"}}), + ({{uuid "nr2"}}, {{uuid "user"}}, 0, {{uuid "cm2"}}), + ({{uuid "nr3"}}, {{uuid "user"}}, 1, {{uuid "cm1"}}), + ({{uuid "nr4"}}, {{uuid "user"}}, 1, {{uuid "cm2"}}); + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "user"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (service_id, description) + values + ({{uuid "sid"}}, 'testing'); +` + h := harness.NewHarness(t, sqlQuery, "add-verification-code") + defer h.Close() + + doQL := func(query string) { + g := h.GraphQLQuery(query) + for _, err := range g.Errors { + t.Error("GraphQL Error:", err.Message) + } + if len(g.Errors) > 0 { + t.Fatal("errors returned from GraphQL") + } + } + + cm1 := h.UUID("cm1") + + doQL(fmt.Sprintf(` + mutation { + sendContactMethodVerification(input:{ + contact_method_id: "%s", + }){ + id + } + } + `, cm1)) + tw := h.Twilio() + d1 := tw.Device(h.Phone("1")) + + msg := d1.ExpectSMS("verification") + tw.WaitAndAssert() // wait for code, and ensure no notifications went out + + code := strings.Map(func(r rune) rune { + if r >= '0' && r <= '9' { + return r + } + return -1 + }, msg.Body()) + + doQL(fmt.Sprintf(` + mutation { + verifyContactMethod(input:{ + contact_method_id: "%s", + verification_code: %s + }){ + contact_method_ids + } + } + `, cm1, code)) + + h.FastForward(time.Minute) + + // both CM's for the given number should be enabled + d1.ExpectSMS("testing") + d1.ExpectVoice("testing") +} diff --git a/smoketest/twiliotestsms_test.go b/smoketest/twiliotestsms_test.go new file mode 100644 index 0000000000..a0f758b249 --- /dev/null +++ b/smoketest/twiliotestsms_test.go @@ -0,0 +1,46 @@ +package smoketest + +import ( + "fmt" + "github.com/target/goalert/smoketest/harness" + "testing" +) + +// TestTwilioSMS checks that a test SMS is processed. +func TestTwilioSMS(t *testing.T) { + t.Parallel() + + sqlQuery := ` + insert into users (id, name, email) + values + ({{uuid "user"}}, 'bob', 'joe'); + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'SMS', {{phone "1"}}); +` + h := harness.NewHarness(t, sqlQuery, "add-verification-code") + defer h.Close() + + doQL := func(query string) { + g := h.GraphQLQuery(query) + for _, err := range g.Errors { + t.Error("GraphQL Error:", err.Message) + } + if len(g.Errors) > 0 { + t.Fatal("errors returned from GraphQL") + } + } + cm1 := h.UUID("cm1") + + doQL(fmt.Sprintf(` + mutation { + sendContactMethodTest(input:{ + contact_method_id: "%s", + }){ + id + } + } + `, cm1)) + + h.Twilio().Device(h.Phone("1")).ExpectSMS("test") +} diff --git a/smoketest/twiliotestvoice_test.go b/smoketest/twiliotestvoice_test.go new file mode 100644 index 0000000000..2aedfd49af --- /dev/null +++ b/smoketest/twiliotestvoice_test.go @@ -0,0 +1,47 @@ +package smoketest + +import ( + "fmt" + "github.com/target/goalert/smoketest/harness" + "testing" +) + +// TestTwilioVoice checks that a test voice call is processed. +func TestTwilioVoice(t *testing.T) { + t.Parallel() + + sqlQuery := ` + insert into users (id, name, email) + values + ({{uuid "user"}}, 'bob', 'joe'); + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'VOICE', {{phone "1"}}); +` + h := harness.NewHarness(t, sqlQuery, "add-verification-code") + defer h.Close() + + doQL := func(query string) { + g := h.GraphQLQuery(query) + for _, err := range g.Errors { + t.Error("GraphQL Error:", err.Message) + } + if len(g.Errors) > 0 { + t.Fatal("errors returned from GraphQL") + } + } + + cm1 := h.UUID("cm1") + + doQL(fmt.Sprintf(` + mutation { + sendContactMethodTest(input:{ + contact_method_id: "%s", + }){ + id + } + } + `, cm1)) + + h.Twilio().Device(h.Phone("1")).ExpectVoice("test") +} diff --git a/smoketest/twiliovoiceack_test.go b/smoketest/twiliovoiceack_test.go new file mode 100644 index 0000000000..e17be22cc1 --- /dev/null +++ b/smoketest/twiliovoiceack_test.go @@ -0,0 +1,58 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" + "time" +) + +// TestTwilioVoiceAck checks that a voice call ack is processed. +func TestTwilioVoiceAck(t *testing.T) { + t.Parallel() + + sql := ` + insert into users (id, name, email, role) + values + ({{uuid "user"}}, 'bob', 'joe', 'user'); + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'VOICE', {{phone "1"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "user"}}, {{uuid "cm1"}}, 0), + ({{uuid "user"}}, {{uuid "cm1"}}, 1); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "user"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (service_id, description) + values + ({{uuid "sid"}}, 'testing'); + +` + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + tw := h.Twilio() + d1 := tw.Device(h.Phone("1")) + + d1.ExpectVoice("testing").ThenPress("4").ThenExpect("acknowledged") + tw.WaitAndAssert() + + h.FastForward(time.Minute) + + h.Delay(time.Second * 15) + // no more messages +} diff --git a/smoketest/twiliovoiceclose_test.go b/smoketest/twiliovoiceclose_test.go new file mode 100644 index 0000000000..07941b143a --- /dev/null +++ b/smoketest/twiliovoiceclose_test.go @@ -0,0 +1,58 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" + "time" +) + +// TestTwilioVoiceClose checks that a voice call close is processed. +func TestTwilioVoiceClose(t *testing.T) { + t.Parallel() + + sql := ` + insert into users (id, name, email, role) + values + ({{uuid "user"}}, 'bob', 'joe', 'user'); + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'VOICE', {{phone "1"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "user"}}, {{uuid "cm1"}}, 0), + ({{uuid "user"}}, {{uuid "cm1"}}, 1); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "user"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (service_id, description) + values + ({{uuid "sid"}}, 'testing'); + +` + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + tw := h.Twilio() + d1 := tw.Device(h.Phone("1")) + + d1.ExpectVoice("testing").ThenPress("6").ThenExpect("closed") + tw.WaitAndAssert() + + h.FastForward(time.Minute) + + h.Delay(time.Second * 15) + // no more messages +} diff --git a/smoketest/twiliovoiceemptymessage_test.go b/smoketest/twiliovoiceemptymessage_test.go new file mode 100644 index 0000000000..454edc950f --- /dev/null +++ b/smoketest/twiliovoiceemptymessage_test.go @@ -0,0 +1,50 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" +) + +// TestTwilioVoiceEmptyMessage checks that an appropriate voice call is made when alert has empty summary. +func TestTwilioVoiceEmptyMessage(t *testing.T) { + t.Parallel() + + sql := ` + insert into users (id, name, email) + values + ({{uuid "user"}}, 'bob', 'joe'); + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'VOICE', {{phone "1"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "user"}}, {{uuid "cm1"}}, 0); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "user"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (service_id, source, summary, details) + values + ({{uuid "sid"}}, 'manual', '', ''); + +` + h := harness.NewHarness(t, sql, "alerts-split-summary-details") + defer h.Close() + + d1 := h.Twilio().Device(h.Phone("1")) + d1.ExpectVoice("No summary provided") + + h.Twilio().WaitAndAssert() +} diff --git a/smoketest/twiliovoicefailure_test.go b/smoketest/twiliovoicefailure_test.go new file mode 100644 index 0000000000..61bea3bb8e --- /dev/null +++ b/smoketest/twiliovoicefailure_test.go @@ -0,0 +1,51 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" +) + +// TestTwilioVoiceFailure checks that a voice call failure is retried. +func TestTwilioVoiceFailure(t *testing.T) { + t.Parallel() + + sql := ` + insert into users (id, name, email) + values + ({{uuid "user"}}, 'bob', 'joe'); + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'VOICE', {{phone "1"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "user"}}, {{uuid "cm1"}}, 0); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "user"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (service_id, description) + values + ({{uuid "sid"}}, 'testing'); + +` + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + d1 := h.Twilio().Device(h.Phone("1")) + d1.ExpectVoice("testing").RespondWithFailed() + + // should not retry (failed calls are perm failure) + h.Trigger() +} diff --git a/smoketest/twiliovoicestop_test.go b/smoketest/twiliovoicestop_test.go new file mode 100644 index 0000000000..5176d21fef --- /dev/null +++ b/smoketest/twiliovoicestop_test.go @@ -0,0 +1,65 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" + "time" +) + +// TestTwilioVoiceStop checks that a voice call STOP is processed. +func TestTwilioVoiceStop(t *testing.T) { + t.Parallel() + + sql := ` + insert into users (id, name, email) + values + ({{uuid "user"}}, 'bob', 'joe'); + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'VOICE', {{phone "1"}}), + ({{uuid "cm2"}}, {{uuid "user"}}, 'personal', 'SMS', {{phone "1"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes) + values + ({{uuid "user"}}, {{uuid "cm1"}}, 0), + ({{uuid "user"}}, {{uuid "cm1"}}, 1), + ({{uuid "user"}}, {{uuid "cm2"}}, 1); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "user"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (service_id, description) + values + ({{uuid "sid"}}, 'testing'); + +` + h := harness.NewHarness(t, sql, "ids-to-uuids") + defer h.Close() + + d1 := h.Twilio().Device(h.Phone("1")) + + d1.ExpectVoice("testing"). + ThenPress("1"). + ThenExpect("unenrollment"). + ThenPress("3"). + ThenExpect("goodbye") + + // Should unenroll completely (no voice or SMS) + h.Twilio().WaitAndAssert() + + h.FastForward(time.Minute) + + h.Delay(time.Second * 15) + // no more messages, it should have disabled both +} diff --git a/smoketest/twiliovoiceverification_test.go b/smoketest/twiliovoiceverification_test.go new file mode 100644 index 0000000000..80ce5762db --- /dev/null +++ b/smoketest/twiliovoiceverification_test.go @@ -0,0 +1,100 @@ +package smoketest + +import ( + "fmt" + "github.com/target/goalert/smoketest/harness" + "strings" + "testing" + "time" +) + +// TestTwilioVoiceVerification checks that a verification voice call is processed. +func TestTwilioVoiceVerification(t *testing.T) { + t.Parallel() + + sqlQuery := ` + insert into users (id, name, email) + values + ({{uuid "user"}}, 'bob', 'joe'); + insert into user_contact_methods (id, user_id, name, type, value, disabled) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'SMS', {{phone "1"}}, true), + ({{uuid "cm2"}}, {{uuid "user"}}, 'personal', 'VOICE', {{phone "1"}}, true); + insert into user_notification_rules (id, user_id, delay_minutes, contact_method_id) + values + ({{uuid "nr1"}}, {{uuid "user"}}, 0, {{uuid "cm1"}}), + ({{uuid "nr2"}}, {{uuid "user"}}, 0, {{uuid "cm2"}}), + ({{uuid "nr3"}}, {{uuid "user"}}, 1, {{uuid "cm1"}}), + ({{uuid "nr4"}}, {{uuid "user"}}, 1, {{uuid "cm2"}}); + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "user"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (service_id, description) + values + ({{uuid "sid"}}, 'testing'); +` + h := harness.NewHarness(t, sqlQuery, "add-verification-code") + defer h.Close() + + doQL := func(query string) { + g := h.GraphQLQuery(query) + for _, err := range g.Errors { + t.Error("GraphQL Error:", err.Message) + } + if len(g.Errors) > 0 { + t.Fatal("errors returned from GraphQL") + } + } + + cm2 := h.UUID("cm2") + + doQL(fmt.Sprintf(` + mutation { + sendContactMethodVerification(input:{ + contact_method_id: "%s", + }){ + id + } + } + `, cm2)) + tw := h.Twilio() + d1 := tw.Device(h.Phone("1")) + + msg := d1.ExpectVoice("verification") + tw.WaitAndAssert() // wait for code, and ensure no notifications went out + + code := strings.Map(func(r rune) rune { + if r >= '0' && r <= '9' { + return r + } + return -1 + }, msg.Body()) + + doQL(fmt.Sprintf(` + mutation { + verifyContactMethod(input:{ + contact_method_id: "%s", + verification_code: %s + }){ + contact_method_ids + } + } + `, cm2, code)) + + h.FastForward(time.Minute) + + // both CM's for the given number should be enabled + d1.ExpectSMS("testing") + d1.ExpectVoice("testing") +} diff --git a/smoketest/upload.sh b/smoketest/upload.sh new file mode 100755 index 0000000000..fffb65699e --- /dev/null +++ b/smoketest/upload.sh @@ -0,0 +1,37 @@ +#!/bin/sh + +FAILURES="" + +while read -r line +do + echo "$line" + if echo "$line" | grep -q "^--- FAIL: " + then + NAME=$(echo "$line" | awk '{print $3}') + FAILURES="$FAILURES $NAME" + fi +done + +if [ "$CI" == "drone" ] && [ -n "$FAILURES" ] +then + SERVER=https://build-image-service.dev.target.goalert.me + echo "" + for name in $FAILURES + do + fname="smoketest/smoketest_db_dump/$NAME.sql" + if [ -e "$fname" ] + then + FILE=$(curl -LksS "$SERVER/sql/" --data-binary "@$fname" -H "Authorization: Bearer $BUILD_IMAGE_SERVICE_TOKEN") + if [ -n "$FILE" ] + then + echo "DB DUMP: $name" + echo " $SERVER/$FILE" + echo "" + else + echo "Failed to upload: $name" + fi + fi + done + + exit 1 +fi diff --git a/smoketest/usernotificationcycles_test.go b/smoketest/usernotificationcycles_test.go new file mode 100644 index 0000000000..eb17c8fc76 --- /dev/null +++ b/smoketest/usernotificationcycles_test.go @@ -0,0 +1,70 @@ +package smoketest + +import ( + "github.com/target/goalert/smoketest/harness" + "testing" + "time" +) + +// TestUserNotificationCycles tests that the engine +// generates notifications for a notification policy based on the +// 'started_at' timestamp in the 'user_notification_cycles' table +func TestUserNotificationCycles(t *testing.T) { + t.Parallel() + + sql := ` + insert into users (id, name, email) + values + ({{uuid "user"}}, 'bob', 'joe'); + insert into user_contact_methods (id, user_id, name, type, value) + values + ({{uuid "cm1"}}, {{uuid "user"}}, 'personal', 'SMS', {{phone "1"}}); + + insert into user_notification_rules (user_id, contact_method_id, delay_minutes, created_at) + values + ({{uuid "user"}}, {{uuid "cm1"}}, 0, now()-'1 hour'::interval), + ({{uuid "user"}}, {{uuid "cm1"}}, 1, now()-'1 hour'::interval), + ({{uuid "user"}}, {{uuid "cm1"}}, 5, now()-'1 hour'::interval); + + insert into escalation_policies (id, name) + values + ({{uuid "eid"}}, 'esc policy'); + insert into escalation_policy_steps (id, escalation_policy_id) + values + ({{uuid "esid"}}, {{uuid "eid"}}); + insert into escalation_policy_actions (escalation_policy_step_id, user_id) + values + ({{uuid "esid"}}, {{uuid "user"}}); + + insert into services (id, escalation_policy_id, name) + values + ({{uuid "sid"}}, {{uuid "eid"}}, 'service'); + + insert into alerts (service_id, description) + values + ({{uuid "sid"}}, 'testing'); + + insert into notification_logs (id, alert_id, contact_method_id, process_timestamp, completed) + values + ({{uuid ""}}, 1, {{uuid "cm1"}}, now() - '119 seconds'::interval, true); + + insert into notification_policy_cycles (id, user_id, alert_id, started_at) + values + ({{uuid ""}}, {{uuid "user"}}, 1, now() - '120 seconds'::interval); +` + h := harness.NewHarness(t, sql, "ev3-remove-status-trigger") + defer h.Close() + + // 0-minute rule should not fire (already sent) + tw := h.Twilio() + d1 := tw.Device(h.Phone("1")) + + d1.ExpectSMS("testing") // 1 minute rule should fire (since we're behind) + h.Delay(15 * time.Second) + tw.WaitAndAssert() + + h.FastForward(5 * time.Minute) + + // 5-min rule should now fire + d1.ExpectSMS("testing") +} diff --git a/sqltrace/attributes.go b/sqltrace/attributes.go new file mode 100644 index 0000000000..7c09d19102 --- /dev/null +++ b/sqltrace/attributes.go @@ -0,0 +1,21 @@ +package sqltrace + +import ( + "net/url" + "strings" + + "go.opencensus.io/trace" +) + +func getConnAttributes(name string) ([]trace.Attribute, error) { + u, err := url.Parse(name) + if err != nil { + return nil, err + } + + return []trace.Attribute{ + trace.StringAttribute("sql.user", u.User.Username()), + trace.StringAttribute("sql.db", strings.TrimPrefix(u.Path, "/")), + trace.StringAttribute("sql.host", u.Host), + }, nil +} diff --git a/sqltrace/conn.go b/sqltrace/conn.go new file mode 100644 index 0000000000..73bc3d6d38 --- /dev/null +++ b/sqltrace/conn.go @@ -0,0 +1,153 @@ +package sqltrace + +import ( + "context" + "database/sql/driver" + "fmt" + "strconv" + + "go.opencensus.io/trace" +) + +type _Conn struct { + conn driver.Conn + drv *_Driver + + span *trace.Span + + attrs []trace.Attribute +} + +var _ driver.Conn = &_Conn{} +var _ driver.ConnBeginTx = &_Conn{} +var _ driver.ConnPrepareContext = &_Conn{} +var _ driver.ExecerContext = &_Conn{} +var _ driver.QueryerContext = &_Conn{} + +func (c *_Conn) Prepare(query string) (driver.Stmt, error) { + return c.PrepareContext(context.Background(), query) +} +func (c *_Conn) PrepareContext(ctx context.Context, query string) (stmt driver.Stmt, err error) { + ctx, sp := c.startSpan(ctx, "SQL.Prepare") + defer sp.End() + c.annotateSpan(query, nil, sp) + + if cp, ok := c.conn.(driver.ConnPrepareContext); ok { + stmt, err = cp.PrepareContext(ctx, query) + } else { + stmt, err = c.conn.Prepare(query) + } + errSpan(err, sp) + if err != nil { + return nil, err + } + + return &_Stmt{ + query: query, + Stmt: stmt, + conn: c, + }, nil +} + +func (c *_Conn) startSpan(ctx context.Context, name string) (context.Context, *trace.Span) { + if c.span != nil { + return trace.StartSpanWithRemoteParent(ctx, name, c.span.SpanContext()) + } + + return trace.StartSpan(ctx, name) +} + +func (c *_Conn) Begin() (driver.Tx, error) { + return c.BeginTx(context.Background(), driver.TxOptions{}) +} + +func (c *_Conn) BeginTx(ctx context.Context, opts driver.TxOptions) (tx driver.Tx, err error) { + ctx, sp := c.startSpan(ctx, "SQL.Tx") + sp.AddAttributes( + trace.BoolAttribute("sql.tx.readOnly", opts.ReadOnly), + trace.Int64Attribute("sql.tx.isolation", int64(opts.Isolation)), + ) + + if cx, ok := c.conn.(driver.ConnBeginTx); ok { + tx, err = cx.BeginTx(ctx, opts) + } else { + //lint:ignore SA1019 We have to fallback if the wrapped driver doesn't implement ConnBeginTx. + tx, err = c.conn.Begin() + } + errSpan(err, sp) + if err != nil { + sp.End() + return nil, err + } + c.span = sp + return &_Tx{conn: c, tx: tx, ctx: ctx}, nil +} +func (c *_Conn) Close() error { + return c.conn.Close() +} + +func (c *_Conn) annotateSpan(query string, args []driver.NamedValue, sp *trace.Span) { + sp.AddAttributes(c.attrs...) + if c.drv.includeQuery { + sp.AddAttributes( + trace.StringAttribute("sql.query", query), + ) + } + if c.drv.includeArgs && len(args) > 0 { + for _, arg := range args { + if arg.Name == "" { + arg.Name = "$" + strconv.Itoa(arg.Ordinal) + } + + sp.AddAttributes( + trace.StringAttribute("sql.arg["+strconv.Quote(arg.Name)+"]", fmt.Sprintf("%v", arg.Value)), + ) + } + } +} + +func (c *_Conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (res driver.Result, err error) { + cec, cecOk := c.conn.(driver.ExecerContext) + //lint:ignore SA1019 We have to fallback if the wrapped driver doesn't implement ExecerContext. + ce, ceOk := c.conn.(driver.Execer) + if !cecOk && !ceOk { + return nil, driver.ErrSkip + } + + ctx, sp := c.startSpan(ctx, "SQL.Exec") + defer sp.End() + c.annotateSpan(query, args, sp) + + if cecOk { + res, err = cec.ExecContext(ctx, query, args) + } else { + res, err = ce.Exec(query, getValue(args)) + } + errSpan(err, sp) + + return res, err +} + +func (c *_Conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (rows driver.Rows, err error) { + cqc, cqcOk := c.conn.(driver.QueryerContext) + //lint:ignore SA1019 We have to fallback if the wrapped driver doesn't implement QueryerContext. + cq, cqOk := c.conn.(driver.Queryer) + if !cqcOk && !cqOk { + return nil, driver.ErrSkip + } + + ctx, sp := c.startSpan(ctx, "SQL.Query") + c.annotateSpan(query, args, sp) + if cqcOk { + rows, err = cqc.QueryContext(ctx, query, args) + } else { + rows, err = cq.Query(query, getValue(args)) + } + errSpan(err, sp) + if err != nil { + sp.End() + return nil, err + } + + return &_Rows{Rows: rows, sp: sp}, nil +} diff --git a/sqltrace/connector.go b/sqltrace/connector.go new file mode 100644 index 0000000000..38d341b27c --- /dev/null +++ b/sqltrace/connector.go @@ -0,0 +1,23 @@ +package sqltrace + +import ( + "context" + "database/sql/driver" + + "go.opencensus.io/trace" +) + +type _Connector struct { + dbc driver.Connector + drv *_Driver + + attrs []trace.Attribute +} + +func (c *_Connector) Connect(ctx context.Context) (driver.Conn, error) { + conn, err := c.dbc.Connect(ctx) + return &_Conn{conn: conn, drv: c.drv, attrs: c.attrs}, err +} +func (c *_Connector) Driver() driver.Driver { + return c.drv +} diff --git a/sqltrace/driver.go b/sqltrace/driver.go new file mode 100644 index 0000000000..587c858885 --- /dev/null +++ b/sqltrace/driver.go @@ -0,0 +1,50 @@ +package sqltrace + +import ( + "database/sql/driver" +) + +type _Driver struct { + drv driver.Driver + includeQuery bool + includeArgs bool +} + +// WrapOptions allow specifying additional information to include in the trace. +type WrapOptions struct { + Query bool // include the SQL query + Args bool // include the arguments passed +} + +// WrapDriver will wrap a database driver with tracing information. +func WrapDriver(drv driver.Driver, opts *WrapOptions) driver.DriverContext { + if opts == nil { + opts = &WrapOptions{} + } + return &_Driver{drv: drv, includeArgs: opts.Args, includeQuery: opts.Query} +} + +func (d *_Driver) Open(name string) (driver.Conn, error) { + attrs, err := getConnAttributes(name) + if err != nil { + return nil, err + } + c, err := d.drv.Open(name) + return &_Conn{conn: c, drv: d, attrs: attrs}, err +} + +func (d *_Driver) OpenConnector(name string) (driver.Connector, error) { + attrs, err := getConnAttributes(name) + if err != nil { + return nil, err + } + if dc, ok := d.drv.(driver.DriverContext); ok { + dbc, err := dc.OpenConnector(name) + return &_Connector{dbc: dbc, drv: d, attrs: attrs}, err + } + return newSimpleConnector(d, name) +} + +func (d *_Driver) Driver() driver.Driver { + return d +} diff --git a/sqltrace/rows.go b/sqltrace/rows.go new file mode 100644 index 0000000000..d3736204c1 --- /dev/null +++ b/sqltrace/rows.go @@ -0,0 +1,20 @@ +package sqltrace + +import ( + "database/sql/driver" + + "go.opencensus.io/trace" +) + +type _Rows struct { + driver.Rows + sp *trace.Span +} + +func (r *_Rows) Next(dest []driver.Value) error { + return errSpan(r.Rows.Next(dest), r.sp) +} +func (r *_Rows) Close() error { + defer r.sp.End() + return errSpan(r.Rows.Close(), r.sp) +} diff --git a/sqltrace/simpleconnector.go b/sqltrace/simpleconnector.go new file mode 100644 index 0000000000..80d80789df --- /dev/null +++ b/sqltrace/simpleconnector.go @@ -0,0 +1,21 @@ +package sqltrace + +import ( + "context" + "database/sql/driver" +) + +type simpleConnector struct { + name string + drv *_Driver +} + +func newSimpleConnector(drv *_Driver, name string) (*simpleConnector, error) { + return &simpleConnector{name: name, drv: drv}, nil +} +func (c *simpleConnector) Driver() driver.Driver { + return c.drv +} +func (c *simpleConnector) Connect(ctx context.Context) (driver.Conn, error) { + return c.drv.Open(c.name) +} diff --git a/sqltrace/stmt.go b/sqltrace/stmt.go new file mode 100644 index 0000000000..f76a0d6d82 --- /dev/null +++ b/sqltrace/stmt.go @@ -0,0 +1,90 @@ +package sqltrace + +import ( + "context" + "database/sql/driver" + "io" + + "github.com/lib/pq" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +type _Stmt struct { + driver.Stmt + query string + conn *_Conn +} + +var _ driver.Stmt = &_Stmt{} +var _ driver.StmtExecContext = &_Stmt{} +var _ driver.StmtQueryContext = &_Stmt{} + +func getValue(args []driver.NamedValue) []driver.Value { + values := make([]driver.Value, len(args)) + for i, arg := range args { + values[i] = arg.Value + } + return values +} +func errSpan(err error, sp *trace.Span) error { + if err == nil { + return nil + } + if err == io.EOF { + return err + } + + attrs := []trace.Attribute{trace.BoolAttribute("error", true)} + + if pErr, ok := errors.Cause(err).(*pq.Error); ok { + attrs = append(attrs, + trace.StringAttribute("pq.error.detail", pErr.Detail), + trace.StringAttribute("pq.error.hint", pErr.Hint), + trace.StringAttribute("pq.error.code.name", pErr.Code.Name()), + trace.StringAttribute("pq.error.code", string(pErr.Code)), + trace.StringAttribute("pq.error.table", pErr.Table), + trace.StringAttribute("pq.error.constraint", pErr.Constraint), + trace.StringAttribute("pq.error.where", pErr.Where), + trace.StringAttribute("pq.error.column", pErr.Column), + ) + } + sp.Annotate(attrs, err.Error()) + + return err +} + +func (s *_Stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (res driver.Result, err error) { + ctx, sp := s.conn.startSpan(ctx, "SQL.Stmt.Exec") + defer sp.End() + s.conn.annotateSpan(s.query, args, sp) + + if sec, ok := s.Stmt.(driver.StmtExecContext); ok { + res, err = sec.ExecContext(ctx, args) + } else { + //lint:ignore SA1019 We have to fallback if the wrapped driver doesn't implement StmtExecContext. + res, err = s.Stmt.Exec(getValue(args)) + } + errSpan(err, sp) + + return res, err +} + +func (s *_Stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (rows driver.Rows, err error) { + ctx, sp := s.conn.startSpan(ctx, "SQL.Stmt.Query") + s.conn.annotateSpan(s.query, args, sp) + + if sqc, ok := s.Stmt.(driver.StmtQueryContext); ok { + rows, err = sqc.QueryContext(ctx, args) + } else { + //lint:ignore SA1019 We have to fallback if the wrapped driver doesn't implement StmtQueryContext. + rows, err = s.Stmt.Query(getValue(args)) + } + errSpan(err, sp) + if err != nil { + sp.End() + return nil, err + } + + return &_Rows{Rows: rows, sp: sp}, nil +} diff --git a/sqltrace/tx.go b/sqltrace/tx.go new file mode 100644 index 0000000000..12a4e3c2ad --- /dev/null +++ b/sqltrace/tx.go @@ -0,0 +1,31 @@ +package sqltrace + +import ( + "context" + "database/sql/driver" + + "go.opencensus.io/trace" +) + +type _Tx struct { + conn *_Conn + tx driver.Tx + ctx context.Context +} + +func (tx *_Tx) Rollback() error { + _, sp := trace.StartSpan(tx.ctx, "SQL.Tx.Rollback") + err := errSpan(tx.tx.Rollback(), sp) + sp.End() + tx.conn.span.End() + tx.conn.span = nil + return err +} +func (tx *_Tx) Commit() error { + _, sp := trace.StartSpan(tx.ctx, "SQL.Tx.Commit") + err := errSpan(tx.tx.Commit(), sp) + sp.End() + tx.conn.span.End() + tx.conn.span = nil + return err +} diff --git a/switchover/dbstate.go b/switchover/dbstate.go new file mode 100644 index 0000000000..abe5cc6e84 --- /dev/null +++ b/switchover/dbstate.go @@ -0,0 +1,51 @@ +package switchover + +import ( + "context" + "database/sql" + "database/sql/driver" + "time" +) + +type dbState struct { + timeOffset time.Duration + dbc driver.Connector + db *sql.DB +} + +func newDBState(ctx context.Context, dbc driver.Connector) (*dbState, error) { + db := sql.OpenDB(dbc) + offset, err := CalcDBOffset(ctx, db) + if err != nil { + db.Close() + return nil, err + } + + return &dbState{ + dbc: dbc, + db: db, + timeOffset: offset, + }, nil +} + +func CalcDBOffset(ctx context.Context, db *sql.DB) (time.Duration, error) { + s, err := db.PrepareContext(ctx, `select now()`) + if err != nil { + return 0, err + } + defer s.Close() + + // pre-run the query to reduce error of first run + s.ExecContext(ctx) + + var sum time.Duration + var t time.Time + for i := 0; i < 10; i++ { + err = s.QueryRowContext(ctx).Scan(&t) + if err != nil { + return 0, err + } + sum += time.Until(t) + } + return sum / 10, err +} diff --git a/switchover/dbsync/ctxshell.go b/switchover/dbsync/ctxshell.go new file mode 100644 index 0000000000..4eb7b69221 --- /dev/null +++ b/switchover/dbsync/ctxshell.go @@ -0,0 +1,75 @@ +package dbsync + +import ( + "context" + "flag" + "os" + "os/signal" + "sync" + + "github.com/abiosoft/ishell" +) + +type ctxShell struct { + *ishell.Shell + mx sync.Mutex + + ctx context.Context + cancel func() +} +type ctxCmd struct { + Name, Help string + HasFlags bool + Func func(context.Context, *ishell.Context) error +} + +func newCtxShell() *ctxShell { + sh := &ctxShell{Shell: ishell.New()} + ch := make(chan os.Signal, 1) + signal.Notify(ch, os.Interrupt) + go func() { + for { + <-ch + sh.mx.Lock() + sh.cancel() + sh.ctx, sh.cancel = context.WithCancel(context.Background()) + sh.mx.Unlock() + } + }() + sh.ctx, sh.cancel = context.WithCancel(context.Background()) + sh.Interrupt(func(c *ishell.Context, count int, input string) { + if count > 1 { + sh.Stop() + return + } + c.Println("Interrupt") + c.Println("Press CTRL+C again to quit.") + }) + return sh +} + +func (sh *ctxShell) AddCmd(cmd ctxCmd) { + sh.Shell.AddCmd(&ishell.Cmd{ + Name: cmd.Name, + Help: cmd.Help, + Func: func(c *ishell.Context) { + sh.mx.Lock() + ctx := sh.ctx + sh.mx.Unlock() + var err error + if !cmd.HasFlags { + fset := flag.NewFlagSet(cmd.Name, flag.ContinueOnError) + err = fset.Parse(c.Args) + } + if err == nil { + err = cmd.Func(ctx, c) + } + if err == flag.ErrHelp { + err = nil + } + if err != nil { + c.Println("ERROR:", err) + } + }, + }) +} diff --git a/switchover/dbsync/diffsync.go b/switchover/dbsync/diffsync.go new file mode 100644 index 0000000000..b8707eefb3 --- /dev/null +++ b/switchover/dbsync/diffsync.go @@ -0,0 +1,154 @@ +package dbsync + +import ( + "context" + "fmt" + "time" + + "github.com/jackc/pgx" + "github.com/pkg/errors" + "github.com/vbauerster/mpb" + "github.com/vbauerster/mpb/decor" +) + +const batchSize = 100 + +type decorElapsedDone time.Time + +func (d decorElapsedDone) Decor(stats *decor.Statistics) string { + if !stats.Completed { + return "" + } + return " in " + time.Since(time.Time(d)).String() +} +func (d decorElapsedDone) Syncable() (bool, chan int) { + return false, nil +} + +func (s *Sync) diffSync(ctx context.Context, txSrc, txDst *pgx.Tx, dstChange int) error { + start := time.Now() + rows, err := txSrc.QueryEx(ctx, ` + with tx_max_id as ( + select max(id), tx_id + from change_log + where id > $1 + group by tx_id + ) + select id, op, table_name, row_id, row_data + from change_log c + join tx_max_id max_id on max_id.tx_id = c.tx_id + where c.id > $1 + order by + max_id.max, + cmd_id::text::int + `, nil, dstChange) + if err != nil { + return errors.Wrap(err, "get changed rows") + } + defer rows.Close() + type change struct { + ID int + OP string + Table string + RowID string + RowData []byte + } + + var changes []change + for rows.Next() { + var c change + err = rows.Scan(&c.ID, &c.OP, &c.Table, &c.RowID, &c.RowData) + if err != nil { + return errors.Wrap(err, "read change") + } + changes = append(changes, c) + } + rows.Close() + fmt.Printf("Fetched %d changes in %s\n", len(changes), time.Since(start)) + + start = time.Now() + // prepare statements + for _, c := range changes { + name := c.OP + ":" + c.Table + var query string + switch c.OP { + case "DELETE": + query = s.table(c.Table).DeleteOneRow() + case "INSERT": + query = s.table(c.Table).InsertOneRow() + case "UPDATE": + query = s.table(c.Table).UpdateOneRow() + } + _, err = txDst.PrepareEx(ctx, name, query, nil) + if err != nil { + return errors.Wrap(err, "prepare statement") + } + } + + _, err = txDst.PrepareEx(ctx, "_ins:change_log", ` + insert into change_log (id, op, table_name, row_id) + values ($1, $2, $3, $4) + `, nil) + if err != nil { + return errors.Wrap(err, "prepare statement") + } + fmt.Println("Prepared statements in", time.Since(start)) + + p := mpb.New() + bar := p.AddBar(int64(len(changes)), + mpb.BarClearOnComplete(), + mpb.PrependDecorators( + decor.CountersNoUnit("Synced %d of %d changes"), + ), + mpb.AppendDecorators( + decorElapsedDone(time.Now()), + ), + ) + + var batchCount int + b := txDst.BeginBatch() + for _, c := range changes { + switch c.OP { + case "DELETE": + b.Queue(c.OP+":"+c.Table, []interface{}{c.RowID}, nil, nil) + case "INSERT": + b.Queue(c.OP+":"+c.Table, []interface{}{c.RowData}, nil, nil) + case "UPDATE": + b.Queue(c.OP+":"+c.Table, []interface{}{c.RowID, c.RowData}, nil, nil) + } + b.Queue("_ins:change_log", []interface{}{c.ID, c.OP, c.Table, c.RowID}, nil, nil) + batchCount++ + if batchCount >= batchSize { + err = b.Send(ctx, nil) + if err != nil { + return errors.Wrap(err, "send batched commands") + } + err = b.Close() + if err != nil { + p.Abort(bar, false) + p.Wait() + fmt.Println("SYNC", c.ID) + return err + } + bar.IncrBy(batchCount) + b = txDst.BeginBatch() + batchCount = 0 + } + } + if batchCount > 0 { + err = b.Send(ctx, nil) + if err != nil { + return errors.Wrap(err, "sync") + } + err = b.Close() + if err != nil { + p.Abort(bar, false) + p.Wait() + return errors.Wrap(err, "sync") + } + bar.IncrBy(batchCount) + } + + p.Wait() + return nil +} diff --git a/switchover/dbsync/initsync.go b/switchover/dbsync/initsync.go new file mode 100644 index 0000000000..acc286900c --- /dev/null +++ b/switchover/dbsync/initsync.go @@ -0,0 +1,101 @@ +package dbsync + +import ( + "bufio" + "context" + "fmt" + "io" + + "github.com/jackc/pgx" + "github.com/pkg/errors" + "github.com/vbauerster/mpb" + "github.com/vbauerster/mpb/decor" +) + +func (s *Sync) initialSync(ctx context.Context, txSrc, txDst *pgx.Tx) error { + p := mpb.New() + var err error + var totalRows int64 + var bars []*mpb.Bar + var toSync []Table + scanBar := p.AddBar(int64(len(s.tables)), + mpb.BarRemoveOnComplete(), + mpb.BarPriority(9999), + mpb.PrependDecorators( + decor.CountersNoUnit("Scanning tables (%d of %d)...", decor.WCSyncSpaceR), + ), + ) + for _, t := range s.tables { + + var rowCount int64 + err := txSrc.QueryRowEx(ctx, `select count(*) from `+t.SafeName(), nil).Scan(&rowCount) + if err != nil { + return err + } + scanBar.Increment() + if rowCount == 0 { + continue + } + totalRows += rowCount + bars = append(bars, p.AddBar(int64(rowCount), + mpb.BarClearOnComplete(), + mpb.PrependDecorators( + decor.Name(t.Name, decor.WCSyncSpaceR), + ), + mpb.AppendDecorators( + decor.OnComplete(decor.Percentage(), "Done"), + ), + )) + toSync = append(toSync, t) + } + tBar := p.AddBar(int64(totalRows), + mpb.BarClearOnComplete(), + mpb.PrependDecorators( + decor.CountersNoUnit("Synced %d of %d rows", decor.WCSyncSpaceR), + ), + ) + abort := func(i int) { + for ; i < len(toSync); i++ { + p.Abort(bars[i], false) + } + p.Abort(tBar, false) + p.Wait() + } + + for i, t := range toSync { + err = func() error { + defer tBar.Increment() + + pr, pw := io.Pipe() + bw := bufio.NewWriter(pw) + br := bufio.NewReader(pr) + errCh := make(chan error, 2) + go func() { + defer pw.Close() + defer bw.Flush() + errCh <- errors.Wrap(txSrc.CopyToWriter(pw, fmt.Sprintf(`copy %s to stdout`, t.SafeName())), "read from src") + }() + go func() { + r := io.TeeReader(br, &progWrite{inc1: tBar.IncrBy, inc2: bars[i].IncrBy}) + errCh <- errors.Wrap(txDst.CopyFromReader(r, fmt.Sprintf(`copy %s from stdin`, t.SafeName())), "write to dst") + }() + err = <-errCh + if err != nil { + return err + } + err = <-errCh + if err != nil { + return err + } + + return nil + }() + if err != nil { + abort(i) + return err + } + } + + p.Wait() + return nil +} diff --git a/switchover/dbsync/listen.go b/switchover/dbsync/listen.go new file mode 100644 index 0000000000..4a3baf1c32 --- /dev/null +++ b/switchover/dbsync/listen.go @@ -0,0 +1,50 @@ +package dbsync + +import ( + "context" + "fmt" + "github.com/target/goalert/switchover" + "time" + + "github.com/jackc/pgx/stdlib" +) + +func (s *Sync) listen() { + for { + // ignoring errors (will reconnect) + err := func() error { + c, err := stdlib.AcquireConn(s.oldDB) + if err != nil { + return err + } + defer stdlib.ReleaseConn(s.oldDB, c) + + err = c.Listen(switchover.StateChannel) + if err != nil { + return err + } + + for { + n, err := c.WaitForNotification(context.Background()) + if err != nil { + return err + } + stat, err := switchover.ParseStatus(n.Payload) + if err != nil { + fmt.Println("ERROR:", err) + continue + } + + s.mx.Lock() + s.nodeStatus[stat.NodeID] = *stat + s.mx.Unlock() + select { + case s.statChange <- struct{}{}: + default: + } + } + }() + fmt.Println("ERROR:", err) + time.Sleep(time.Second) + } +} diff --git a/switchover/dbsync/sequences.go b/switchover/dbsync/sequences.go new file mode 100644 index 0000000000..4a0215b833 --- /dev/null +++ b/switchover/dbsync/sequences.go @@ -0,0 +1,57 @@ +package dbsync + +import ( + "context" + + "github.com/jackc/pgx" + "github.com/jackc/pgx/pgtype" + "github.com/pkg/errors" +) + +func (s *Sync) syncSequences(ctx context.Context, txSrc, txDst *pgx.Tx) error { + rows, err := txSrc.QueryEx(ctx, ` + select sequence_name + from information_schema.sequences + where + sequence_catalog = current_database() and + sequence_schema = 'public' + `, nil) + if err != nil { + return errors.Wrap(err, "get sequence names") + } + defer rows.Close() + var names []string + for rows.Next() { + var name string + err = rows.Scan(&name) + if err != nil { + return errors.Wrap(err, "scan sequence name") + } + names = append(names, name) + } + rows.Close() + batchRead := txSrc.BeginBatch() + for _, name := range names { + batchRead.Queue(`select last_value, is_called from `+pgx.Identifier{name}.Sanitize(), nil, nil, []int16{pgx.BinaryFormatCode, pgx.BinaryFormatCode}) + } + err = batchRead.Send(ctx, nil) + if err != nil { + return errors.Wrap(err, "send src sequence queries") + } + batch := txDst.BeginBatch() + for _, name := range names { + var lastVal int64 + var called bool + err = batchRead.QueryRowResults().Scan(&lastVal, &called) + if err != nil { + return errors.Wrapf(err, "get src sequence state for '%s'", name) + } + batch.Queue(`select pg_catalog.setval($1, $2, $3)`, []interface{}{name, lastVal, called}, []pgtype.OID{pgtype.TextOID, pgtype.Int8OID, pgtype.BoolOID}, nil) + } + err = batch.Send(ctx, nil) + if err != nil { + return errors.Wrap(err, "update dst sequence state") + } + + return errors.Wrap(batch.Close(), "update dst sequence state") +} diff --git a/switchover/dbsync/shell.go b/switchover/dbsync/shell.go new file mode 100644 index 0000000000..c5c8e89aa3 --- /dev/null +++ b/switchover/dbsync/shell.go @@ -0,0 +1,432 @@ +package dbsync + +import ( + "context" + "database/sql" + "flag" + "fmt" + "github.com/target/goalert/migrate" + "github.com/target/goalert/switchover" + "net/url" + "strings" + "time" + + "github.com/vbauerster/mpb" + "github.com/vbauerster/mpb/decor" + + "github.com/abiosoft/ishell" + _ "github.com/jackc/pgx/stdlib" // load PGX driver + "github.com/pkg/errors" +) + +// RunShell will start the switchover shell. +func RunShell(oldURL, newURL string) error { + ctx := context.Background() + u, err := url.Parse(oldURL) + if err != nil { + return errors.Wrap(err, "parse old URL") + } + q := u.Query() + q.Set("application_name", "GoAlert Switch-Over Shell") + u.RawQuery = q.Encode() + oldURL = u.String() + + u, err = url.Parse(newURL) + if err != nil { + return errors.Wrap(err, "parse new URL") + } + q = u.Query() + q.Set("application_name", "GoAlert Switch-Over Shell") + u.RawQuery = q.Encode() + newURL = u.String() + + db, err := sql.Open("pgx", oldURL) + if err != nil { + return errors.Wrap(err, "open DB") + } + + var numMigrations int + err = db.QueryRowContext(ctx, `select count(*) from gorp_migrations`).Scan(&numMigrations) + if err != nil { + return errors.Wrap(err, "validate migration number") + } + if numMigrations != len(migrate.Names()) { + return errors.Errorf("got %d migrations but expected %d", numMigrations, len(migrate.Names())) + } + + fmt.Println("Applying migrations to next-db...") + dbNew, err := sql.Open("pgx", newURL) + if err != nil { + return errors.Wrap(err, "open next-DB") + } + _, err = migrate.ApplyAll(ctx, dbNew) + if err != nil { + return errors.Wrap(err, "migrate next-DB") + } + dbNew.Close() + + dbNew, err = sql.Open("pgx", newURL) + if err != nil { + return errors.Wrap(err, "open next-DB") + } + sendNotif, err := db.PrepareContext(ctx, `select pg_notify($1, $2)`) + if err != nil { + return errors.Wrap(err, "prepare notify statement") + } + + s, err := NewSync(ctx, db, dbNew, newURL) + if err != nil { + return errors.Wrap(err, "init sync manager") + } + + sh := newCtxShell() + sh.AddCmd(ctxCmd{ + Name: "sync", + Help: "Execute DB sync.", + HasFlags: true, + Func: func(ctx context.Context, sh *ishell.Context) error { + fset := flag.NewFlagSet("sync", flag.ContinueOnError) + cont := fset.Bool("continuous", false, "Perform continuous sync (up to once per second).") + err := fset.Parse(sh.Args) + if err != nil { + return err + } + + if *cont { + sh.Print("\033[H\033[2J") + } + start := time.Now() + err = s.Sync(ctx, false, false) + if err != nil { + return err + } + sh.Printf("Completed sync in %s\n", time.Since(start).Truncate(time.Millisecond).String()) + + if !*cont { + return nil + } + + t := time.NewTicker(time.Second) + for { + select { + case <-t.C: + if *cont { + sh.Print("\033[H\033[2J") + } + start := time.Now() + err = s.Sync(ctx, false, false) + if err != nil { + return err + } + sh.Printf("Completed sync in %s\n", time.Since(start).Truncate(time.Millisecond).String()) + case <-ctx.Done(): + return nil + } + } + + }, + }) + sh.AddCmd(ctxCmd{ + Name: "enable", + Help: "Enable change_log", + Func: func(ctx context.Context, sh *ishell.Context) error { + res, err := db.ExecContext(ctx, `update switchover_state set current_state = 'in_progress' where current_state = 'idle'`) + if err != nil { + return err + } + r, err := res.RowsAffected() + if err != nil { + return err + } + if r != 1 { + return errors.New("not idle") + } + + status, err := s.status(ctx) + if err != nil { + return err + } + sh.Println(status) + sh.Println("change_log enabled.") + + return nil + }, + }) + sh.AddCmd(ctxCmd{ + Name: "disable", + Help: "Enable change_log", + Func: func(ctx context.Context, sh *ishell.Context) error { + res, err := db.ExecContext(ctx, `update switchover_state set current_state = 'idle' where current_state = 'in_progress'`) + if err != nil { + return err + } + r, err := res.RowsAffected() + if err != nil { + return err + } + if r != 1 { + return errors.New("not in_progress") + } + + status, err := s.status(ctx) + if err != nil { + return err + } + sh.Println(status) + sh.Println("change_log disabled") + return nil + }, + }) + + sh.AddCmd(ctxCmd{ + Name: "reset", + Help: "Reset node status", + Func: func(ctx context.Context, sh *ishell.Context) error { + s.mx.Lock() + for key := range s.nodeStatus { + delete(s.nodeStatus, key) + } + s.mx.Unlock() + _, err := sendNotif.ExecContext(ctx, switchover.ControlChannel, "reset") + if err != nil { + return err + + } + + status, err := s.status(ctx) + if err != nil { + return err + } + sh.Println(status) + sh.Println("Reset signal sent.") + + return nil + }, + }) + sh.AddCmd(ctxCmd{ + Name: "status", + Help: "Print current status.", + HasFlags: true, + Func: func(ctx context.Context, sh *ishell.Context) error { + fset := flag.NewFlagSet("status", flag.ContinueOnError) + watch := fset.Bool("w", false, "Watch mode.") + dur := fset.Duration("n", 2*time.Second, "Time between updates") + err := fset.Parse(sh.Args) + if err != nil { + return err + } + + status, err := s.status(ctx) + if err != nil { + return err + } + if *watch { + sh.Print("\033[H\033[2J") + } + sh.Println(status) + if !*watch { + return nil + } + + t := time.NewTicker(*dur) + for { + select { + case <-t.C: + status, err := s.status(ctx) + if err != nil { + return err + } + sh.Print("\033[H\033[2J") + sh.Println(status) + case <-ctx.Done(): + return nil + } + } + }, + }) + sh.AddCmd(ctxCmd{ + Name: "execute", + HasFlags: true, + Help: "Execute the switchover procedure.", + Func: func(ctx context.Context, sh *ishell.Context) error { + cfg := switchover.DefaultConfig() + fset := flag.NewFlagSet("execute", flag.ContinueOnError) + fset.BoolVar(&cfg.NoPauseAPI, "allow-api", cfg.NoPauseAPI, "Allow API requests during pause phase (DB calls will still pause during final sync).") + fset.DurationVar(&cfg.ConsensusTimeout, "consensus-timeout", cfg.ConsensusTimeout, "Timeout to reach consensus.") + fset.DurationVar(&cfg.PauseDelay, "pause-delay", cfg.PauseDelay, "Delay from start until global pause begins.") + fset.DurationVar(&cfg.PauseTimeout, "pause-timeout", cfg.PauseTimeout, "Timeout to achieve global pause.") + fset.DurationVar(&cfg.MaxPause, "max-pause", cfg.MaxPause, "Maximum duration for any pause/delay/impact during switchover.") + extraSync := fset.Bool("extra-sync", false, "Do a second sync after pausing, immediately before the final sync (useful with -allow-api).") + noSwitch := fset.Bool("no-switch", false, "Run the entire procedure, but omit the final use_next_db update.") + err := fset.Parse(sh.Args) + if err != nil { + if err == flag.ErrHelp { + return nil + } + return err + } + + status, err := s.status(ctx) + if err != nil { + return errors.Wrap(err, "get status") + } + + details := new(strings.Builder) + + pauseAPI := "yes" + if cfg.NoPauseAPI { + pauseAPI = "no" + } + fmt.Fprintln(details, status) + fmt.Fprintln(details, "Switch-Over Details") + fmt.Fprintln(details, " Pause API Requests:", pauseAPI) + fmt.Fprintln(details, " Consensus Timeout :", cfg.ConsensusTimeout) + fmt.Fprintln(details, " Pause Starts After:", cfg.PauseDelay) + fmt.Fprintln(details, " Pause Timeout :", cfg.PauseTimeout) + fmt.Fprintln(details, " Absolute Max Pause:", cfg.MaxPause) + fmt.Fprintln(details, " Avail. Sync Time :", cfg.MaxPause-2*time.Second-cfg.PauseTimeout, "-", cfg.MaxPause-2*time.Second) + fmt.Fprintln(details, " Max Alloted Time :", cfg.PauseDelay+cfg.MaxPause) + fmt.Fprintln(details) + fmt.Fprintln(details, "Ready?") + + if sh.MultiChoice([]string{"Cancel", "Go!"}, details.String()) != 1 { + sh.Println() + return nil + } + + start := time.Now() + err = s.Sync(ctx, false, false) + if err != nil { + return err + } + sh.Printf("Completed sync in %s\n", time.Since(start).Truncate(time.Second/10).String()) + + nodes := s.NodeStatus() + n := len(nodes) + if n == 0 { + return errors.New("no nodes are available") + } + + if !s.Ready() { + return errors.New("all nodes are not ready") + } + + for _, stat := range nodes { + if !stat.MatchDBNext(newURL) { + return errors.New("one or more nodes (or this shell) have mismatched config, check db-url-next") + } + if stat.At.Before(time.Now().Add(-5 * time.Second)) { + return errors.New("one or more nodes have timed out (try reset)") + } + } + + p := mpb.New() + var done bool + abort := func() { + if !done { + sh.Println("ABORT") + sendNotif.ExecContext(context.Background(), switchover.ControlChannel, "abort") + } + } + defer abort() + + cfg.BeginAt = time.Now() + + sh.Println() + sh.Println("Switch-Over Start ::", cfg.BeginAt.Format(time.StampMilli)) + sh.Println() + + swDeadline := cfg.AbsoluteDeadline().Add(-2 * time.Second) + ctx, cancel := context.WithDeadline(ctx, swDeadline) + defer cancel() + + _, err = sendNotif.ExecContext(ctx, switchover.ControlChannel, cfg.Serialize(s.Offset())) + if err != nil { + return errors.Wrap(err, "send control message") + } + + cBar := p.AddBar(int64(n), + mpb.PrependDecorators(decor.Name("Consensus", decor.WCSyncSpaceR)), + mpb.BarClearOnComplete(), + mpb.AppendDecorators( + decor.OnComplete(decor.CountersNoUnit("(%d of %d nodes)", decor.WCSyncSpaceR), "Done"), + ), + ) + + cCtx, cCancel := context.WithDeadline(ctx, cfg.ConsensusDeadline()) + defer cCancel() + err = s.NodeStateWait(cCtx, n, cBar, switchover.StateArmed, switchover.StateArmWait) + if err != nil { + p.Abort(cBar, false) + p.Wait() + return errors.Wrap(err, "wait for consensus") + } + p.Wait() + + t := time.NewTicker(time.Second) + tE := time.NewTimer(time.Until(cfg.PauseAt())) + waitLoop: + for { + dur := time.Until(cfg.PauseAt()).Truncate(time.Second) + if dur >= time.Second { + sh.Printf("Stop-The-World Pause begins in %ds...\n", dur/time.Second) + } else { + break + } + select { + case <-t.C: + case <-tE.C: + break waitLoop + case <-ctx.Done(): + return ctx.Err() + } + } + + p = mpb.New() + pBar := p.AddBar(int64(n), + mpb.PrependDecorators(decor.Name("STW Pause", decor.WCSyncSpaceR)), + mpb.BarClearOnComplete(), + mpb.AppendDecorators( + decor.OnComplete(decor.CountersNoUnit("(%d of %d nodes)", decor.WCSyncSpaceR), "Done"), + ), + ) + pCtx, pCancel := context.WithDeadline(ctx, cfg.PauseDeadline()) + defer pCancel() + err = s.NodeStateWait(pCtx, n, pBar, switchover.StatePaused, switchover.StatePauseWait) + if err != nil { + p.Abort(pBar, false) + p.Wait() + return errors.Wrap(err, "wait for pause") + } + p.Wait() + + if *extraSync { + start = time.Now() + err = s.Sync(ctx, false, false) + if err != nil { + return err + } + sh.Printf("Completed extra sync in %s\n", time.Since(start).Truncate(time.Second/10).String()) + } + + sh.Println("Begin final synchronization") + err = s.Sync(ctx, true, !*noSwitch) + if err != nil { + return err + } + + if !*noSwitch { + sh.Println("Next DB is now permanently active, switchover complete.") + } + + _, err = sendNotif.ExecContext(ctx, switchover.ControlChannel, "done") + done = true + return errors.Wrap(err, "send done signal") + }, + }) + + fmt.Println("GoAlert Switch-Over Shell") + fmt.Println(sh.HelpText()) + sh.Run() + return nil +} diff --git a/switchover/dbsync/status.go b/switchover/dbsync/status.go new file mode 100644 index 0000000000..498971ea7e --- /dev/null +++ b/switchover/dbsync/status.go @@ -0,0 +1,104 @@ +package dbsync + +import ( + "context" + "fmt" + "strconv" + "strings" + "time" + + "github.com/alexeyco/simpletable" + "github.com/pkg/errors" +) + +func (s *Sync) status(ctx context.Context) (string, error) { + rows, err := s.oldDB.QueryContext(ctx, ` + select count(*), application_name, usename + from pg_stat_activity + where datname=current_database() + group by application_name, usename + order by application_name, usename + `) + if err != nil { + return "", errors.Wrap(err, "check DB connections") + } + defer rows.Close() + table := simpletable.New() + table.Header = &simpletable.Header{ + Cells: []*simpletable.Cell{ + {Text: "Application"}, + {Text: "Username"}, + {Text: "Connections"}, + }, + } + for rows.Next() { + var num int + var name string + var user string + err = rows.Scan(&num, &name, &user) + if err != nil { + return "", errors.Wrap(err, "scan query results") + } + table.Body.Cells = append(table.Body.Cells, []*simpletable.Cell{ + {Text: name}, + {Text: user}, + {Text: strconv.Itoa(num)}, + }) + } + rows.Close() + buf := new(strings.Builder) + buf.WriteString(table.String() + "\n\n") + + table = simpletable.New() + table.Header = &simpletable.Header{ + Cells: []*simpletable.Cell{ + {Text: "Node ID"}, + {Text: "Status"}, + {Text: "Offset"}, + {Text: "Config"}, + {Text: "Last Seen"}, + {Text: "ActiveRequests"}, + }, + } + nodes := s.NodeStatus() + for _, stat := range nodes { + cfg := "Valid" + if !stat.MatchDBNext(s.newURL) { + cfg = "Invalid" + } + table.Body.Cells = append(table.Body.Cells, []*simpletable.Cell{ + {Text: stat.NodeID}, + {Text: string(stat.State)}, + {Text: stat.Offset.String()}, + {Text: cfg}, + {Text: time.Since(stat.At).Truncate(time.Millisecond).String()}, + {Text: strconv.Itoa(stat.ActiveRequests)}, + }) + } + buf.WriteString(table.String() + "\n\n") + + fmt.Fprintf(buf, "Node Count: %d\n", len(nodes)) + fmt.Fprintln(buf, "Local offset:", s.Offset()) + var stat string + err = s.oldDB.QueryRowContext(ctx, `select current_state from switchover_state`).Scan(&stat) + if err != nil { + return "", errors.Wrap(err, "lookup switchover state") + } + + fmt.Fprintln(buf, "Switchover state:", stat) + + var changeMax int + err = s.oldDB.QueryRowContext(ctx, `select coalesce(max(id),0) from change_log`).Scan(&changeMax) + if err != nil { + return "", errors.Wrap(err, "lookup change id") + } + fmt.Fprintln(buf, "Max change_log ID:", changeMax) + + err = s.newDB.QueryRowContext(ctx, `select coalesce(max(id),0) from change_log`).Scan(&changeMax) + if err != nil { + return "", errors.Wrap(err, "lookup change id (new)") + } + fmt.Fprintln(buf, "Max change_log ID (next DB):", changeMax) + + return buf.String(), nil +} diff --git a/switchover/dbsync/sync.go b/switchover/dbsync/sync.go new file mode 100644 index 0000000000..938161695c --- /dev/null +++ b/switchover/dbsync/sync.go @@ -0,0 +1,337 @@ +package dbsync + +import ( + "bytes" + "context" + "database/sql" + "fmt" + "github.com/target/goalert/lock" + "github.com/target/goalert/switchover" + "sort" + "strconv" + "sync" + "time" + + "github.com/jackc/pgx" + "github.com/jackc/pgx/stdlib" + "github.com/pkg/errors" + "github.com/vbauerster/mpb" +) + +type Sync struct { + oldDB, newDB *sql.DB + newURL string + oldOffset time.Duration + newOffset time.Duration + tables []Table + nodeStatus map[string]switchover.Status + mx sync.Mutex + statChange chan struct{} +} + +func NewSync(ctx context.Context, oldDB, newDB *sql.DB, newURL string) (*Sync, error) { + tables, err := Tables(ctx, oldDB) + if err != nil { + return nil, err + } + + oldOffset, err := switchover.CalcDBOffset(ctx, oldDB) + if err != nil { + return nil, err + } + + newOffset, err := switchover.CalcDBOffset(ctx, newDB) + if err != nil { + return nil, err + } + + s := &Sync{ + oldDB: oldDB, + newDB: newDB, + newURL: newURL, + tables: tables, + oldOffset: oldOffset, + newOffset: newOffset, + nodeStatus: make(map[string]switchover.Status), + statChange: make(chan struct{}), + } + go s.listen() + + return s, nil +} +func (s *Sync) Offset() time.Duration { + return s.oldOffset +} +func (s *Sync) NodeStatus() []switchover.Status { + var stat []switchover.Status + s.mx.Lock() + for _, st := range s.nodeStatus { + stat = append(stat, st) + } + s.mx.Unlock() + sort.Slice(stat, func(i, j int) bool { return stat[i].NodeID < stat[j].NodeID }) + return stat +} + +type WaitState struct { + Done int + Total int + Abort bool +} + +func (s *Sync) NodeStateWait(ctx context.Context, origTotal int, bar *mpb.Bar, anyState ...switchover.State) error { + for { + abort, n, total := s.nodeStateAll(anyState...) + if abort { + return errors.New("node abort") + } + if total != origTotal { + return errors.New("new node appeared while waiting") + } + cur := bar.Current() + if n != cur { + bar.IncrBy(int(n-cur), time.Second) + } + if int(n) == total { + return nil + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-s.statChange: + continue + } + } +} +func (s *Sync) Ready() bool { + a, c, n := s.nodeStateAll(switchover.StateReady) + if a { + return false + } + if int(c) != n { + return false + } + if n == 0 { + return false + } + return true +} +func (s *Sync) nodeStateAll(anyState ...switchover.State) (bool, int64, int) { + s.mx.Lock() + defer s.mx.Unlock() + var count int64 +nodeCheck: + for _, stat := range s.nodeStatus { + if stat.State == switchover.StateAbort { + return true, 0, 0 + } + for _, state := range anyState { + if state == stat.State { + count++ + continue nodeCheck + } + } + } + return false, count, len(s.nodeStatus) +} +func (s *Sync) Aborted() bool { + s.mx.Lock() + defer s.mx.Unlock() + for _, stat := range s.nodeStatus { + if stat.State == switchover.StateAbort { + return true + } + } + return false +} + +type progWrite struct { + inc1 func(int, ...time.Duration) + inc2 func(int, ...time.Duration) +} + +func (w *progWrite) Write(p []byte) (int, error) { + n := bytes.Count(p, []byte{'\n'}) + w.inc1(n) + w.inc2(n) + return len(p), nil +} + +func (s *Sync) table(tableName string) Table { + for _, t := range s.tables { + if t.Name != tableName { + continue + } + return t + } + panic("unknown table: " + tableName) +} + +func (s *Sync) Sync(ctx context.Context, isFinal, enableSwitchOver bool) error { + var stat string + + srcConn, err := stdlib.AcquireConn(s.oldDB) + if err != nil { + return errors.Wrap(err, "get src conn") + } + defer stdlib.ReleaseConn(s.oldDB, srcConn) + defer srcConn.Close() + + var gotLock bool + if isFinal { + d, ok := ctx.Deadline() + if !ok { + return errors.New("context missing deadline for final sync") + } + + lockMs := int64(time.Until(d.Add(-time.Second)) / time.Millisecond) + if lockMs < 0 { + return errors.New("not enough time remaining for lock") + } + + _, err = srcConn.ExecEx(ctx, `set lock_timeout = `+strconv.FormatInt(lockMs, 10), nil) + if err != nil { + return errors.Wrap(err, "set lock_timeout") + } + _, err = srcConn.ExecEx(ctx, `select pg_advisory_lock($1)`, nil, lock.GlobalSwitchOver) + if err == nil { + gotLock = true + } + } else { + err = srcConn.QueryRowEx(ctx, `select pg_try_advisory_lock_shared($1)`, nil, lock.GlobalSwitchOver).Scan(&gotLock) + } + if err != nil { + return errors.Wrap(err, "acquire advisory lock") + } + if !gotLock { + return errors.New("failed to get lock") + } + + err = srcConn.QueryRowEx(ctx, `select current_state from switchover_state nowait`, nil).Scan(&stat) + if err != nil { + return errors.Wrap(err, "get current state") + } + if stat == "use_next_db" { + return errors.New("switchover already completed") + } + if stat == "idle" { + return errors.New("run enable first") + } + + dstConn, err := stdlib.AcquireConn(s.newDB) + if err != nil { + return errors.Wrap(err, "get dst conn") + } + defer stdlib.ReleaseConn(s.newDB, dstConn) + + start := time.Now() + txSrc, err := srcConn.BeginEx(ctx, &pgx.TxOptions{ + IsoLevel: pgx.Serializable, + AccessMode: pgx.ReadOnly, + DeferrableMode: pgx.Deferrable, + }) + if err != nil { + return errors.Wrap(err, "start src transaction") + } + defer txSrc.Rollback() + fmt.Println("Got src tx after", time.Since(start)) + + txDst, err := dstConn.BeginEx(ctx, &pgx.TxOptions{ + IsoLevel: pgx.Serializable, + AccessMode: pgx.ReadWrite, + DeferrableMode: pgx.Deferrable, + }) + if err != nil { + return errors.Wrap(err, "start dst transaction") + } + defer txDst.Rollback() + + _, err = txDst.ExecEx(ctx, `SET CONSTRAINTS ALL DEFERRED`, nil) + if err != nil { + return errors.Wrap(err, "defer constraints") + } + + var srcLastChange, dstLastChange int + err = txSrc.QueryRowEx(ctx, `select coalesce(max(id), 0) from change_log`, nil).Scan(&srcLastChange) + if err != nil { + return errors.Wrap(err, "check src last change") + } + err = txDst.QueryRowEx(ctx, `select coalesce(max(id), 0) from change_log`, nil).Scan(&dstLastChange) + if err != nil { + return errors.Wrap(err, "check dst last change") + } + if srcLastChange == 0 { + return errors.New("change_log (or no changes) on src DB") + } + + if !isFinal { + start = time.Now() + batch := txDst.BeginBatch() + for _, t := range s.tables { + batch.Queue(`alter table `+t.SafeName()+` disable trigger user`, nil, nil, nil) + } + err = batch.Send(ctx, nil) + if err != nil { + return errors.Wrap(err, "disable triggers (send)") + } + err = batch.Close() + if err != nil { + return errors.Wrap(err, "disable triggers") + } + fmt.Println("Disabled triggers in", time.Since(start)) + } + + if dstLastChange == 0 { + err = s.initialSync(ctx, txSrc, txDst) + } else if srcLastChange > dstLastChange { + err = s.diffSync(ctx, txSrc, txDst, dstLastChange) + } + if err != nil { + return errors.Wrap(err, "sync") + } + + start = time.Now() + err = s.syncSequences(ctx, txSrc, txDst) + if err != nil { + return errors.Wrap(err, "update sequence numbers") + } + fmt.Println("Updated sequences in", time.Since(start)) + + err = txSrc.Commit() + if err != nil { + return errors.Wrap(err, "commit src") + } + + err = txDst.Commit() + if err != nil { + return errors.Wrap(err, "commit dst") + } + + if isFinal { + start = time.Now() + batch := dstConn.BeginBatch() + for _, t := range s.tables { + batch.Queue(`alter table `+t.SafeName()+` enable trigger user`, nil, nil, nil) + } + err = batch.Send(ctx, nil) + if err != nil { + return errors.Wrap(err, "enable triggers (send)") + } + err = batch.Close() + if err != nil { + return errors.Wrap(err, "enable triggers") + } + fmt.Println("Re-enabled triggers in", time.Since(start)) + + if enableSwitchOver { + _, err = srcConn.ExecEx(ctx, `update switchover_state set current_state = 'use_next_db'`, nil) + if err != nil { + return errors.Wrap(err, "update state table") + } + fmt.Println("State updated: next-db is now active!") + } + } + + return nil +} diff --git a/switchover/dbsync/table.go b/switchover/dbsync/table.go new file mode 100644 index 0000000000..dd3ee3dd5b --- /dev/null +++ b/switchover/dbsync/table.go @@ -0,0 +1,217 @@ +package dbsync + +import ( + "context" + "database/sql" + "fmt" + "sort" + "strings" + + "github.com/jackc/pgx" + "github.com/pkg/errors" +) + +var ignoreTables = []string{ + "switchover_state", + "engine_processing_versions", + "gorp_migrations", +} + +type Table struct { + Name string + Columns []Column + IDCol Column + + DependsOn map[string]bool + DependantOf map[string]bool +} +type Column struct { + Name string + Type string + Ord int +} + +func contains(strs []string, s string) bool { + for _, str := range strs { + if str == s { + return true + } + } + return false +} + +func Tables(ctx context.Context, db *sql.DB) ([]Table, error) { + rows, err := db.QueryContext(ctx, ` + select col.table_name, col.column_name, col.data_type, col.ordinal_position + from information_schema.columns col + join information_schema.tables t on + t.table_catalog = col.table_catalog and + t.table_schema = col.table_schema and + t.table_name = col.table_name and + t.table_type = 'BASE TABLE' + where col.table_catalog = current_database() and col.table_schema = 'public' + `) + if err != nil { + return nil, err + } + defer rows.Close() + t := make(map[string]*Table) + for rows.Next() { + var col Column + var name string + err = rows.Scan(&name, &col.Name, &col.Type, &col.Ord) + if err != nil { + return nil, err + } + if contains(ignoreTables, name) { + continue + } + tbl := t[name] + if tbl == nil { + tbl = &Table{Name: name, DependsOn: make(map[string]bool), DependantOf: make(map[string]bool)} + t[name] = tbl + } + tbl.Columns = append(tbl.Columns, col) + if col.Name == "id" { + tbl.IDCol = col + } + } + + rows, err = db.QueryContext(ctx, ` + select src.relname, ref.relname + from pg_catalog.pg_constraint con + join pg_namespace ns on ns.nspname = 'public' and ns.oid = con.connamespace + join pg_class src on src.oid = con.conrelid + join pg_class ref on ref.oid = con.confrelid + where con.contype = 'f' and not con.condeferrable + `) + if err != nil { + return nil, errors.Wrap(err, "fetch non-deferrable dependencies") + } + defer rows.Close() + for rows.Next() { + var srcName, refName string + err = rows.Scan(&srcName, &refName) + if err != nil { + return nil, errors.Wrap(err, "scan non-deferrable dependency") + } + t[srcName].DependsOn[refName] = true + t[refName].DependantOf[srcName] = true + if t[refName].DependsOn[srcName] { + return nil, errors.Errorf("circular non-deferrable dependency between '%s' and '%s'", srcName, refName) + } + } + + var isRecursiveDep func(a, b string) bool + isRecursiveDep = func(a, b string) bool { + // if 'a' depends on 'b' + if t[a].DependsOn[b] { + return true + } + + // if a dep of 'a' depends on 'b' + for dep := range t[a].DependsOn { + if isRecursiveDep(dep, b) { + return true + } + } + + return false + } + var recursiveDependants func(Table) []Table + recursiveDependants = func(tbl Table) []Table { + var tables []Table + for name := range tbl.DependantOf { + tables = append(tables, *t[name]) + tables = append(tables, recursiveDependants(*t[name])...) + } + return tables + } + + tables := make([]Table, 0, len(t)) + for _, tbl := range t { + sort.Slice(tbl.Columns, func(i, j int) bool { return tbl.Columns[i].Ord < tbl.Columns[j].Ord }) + tables = append(tables, *tbl) + } + + // sort by name + sort.Slice(tables, func(i, j int) bool { return tables[i].Name < tables[j].Name }) + + // sort by deps + depOrder := make([]Table, 0, len(tables)) + deps := make(map[string]bool) + for len(depOrder) < len(tables) { + tableLoop: + for _, t := range tables { + if deps[t.Name] { + continue + } + for depName := range t.DependsOn { + if !deps[depName] { + continue tableLoop + } + } + deps[t.Name] = true + depOrder = append(depOrder, t) + } + } + + return depOrder, nil +} + +func (c Column) IsInteger() bool { + switch c.Type { + case "integer", "bigint": + return true + } + return false +} +func (t Table) SafeName() string { + return pgx.Identifier{t.Name}.Sanitize() +} +func (t Table) ColumnNames() []string { + colNames := make([]string, len(t.Columns)) + for i, col := range t.Columns { + colNames[i] = col.Name + } + return colNames +} + +func (t Table) FetchOneRow() string { + return fmt.Sprintf(`select * from %s where id = cast($1 as %s)`, t.SafeName(), t.IDCol.Type) +} +func (t Table) DeleteOneRow() string { + return fmt.Sprintf(`delete from %s where id = cast($1 as %s)`, t.SafeName(), t.IDCol.Type) +} +func (t Table) InsertOneRow() string { + return fmt.Sprintf(` + insert into %s + select * from + json_populate_record(null::%s, $1) + as data + `, + t.SafeName(), + t.SafeName(), + ) +} + +func (t Table) UpdateOneRow() string { + cols := make([]string, 0, len(t.Columns)) + for _, col := range t.Columns { + if col.Name == "id" { + continue + } + cols = append(cols, fmt.Sprintf(`%s = data.%s`, pgx.Identifier{col.Name}.Sanitize(), pgx.Identifier{col.Name}.Sanitize())) + } + + return fmt.Sprintf(` + update %s dst + set %s + from (select * from json_populate_record(null::%s, $2)) as data + where dst.id = $1 + `, + t.SafeName(), + strings.Join(cols, ", "), + t.SafeName(), + ) +} diff --git a/switchover/deadlineconfig.go b/switchover/deadlineconfig.go new file mode 100644 index 0000000000..bc73223acd --- /dev/null +++ b/switchover/deadlineconfig.go @@ -0,0 +1,106 @@ +package switchover + +import ( + "context" + "net/url" + "time" +) + +type ctxValue int + +const ( + ctxValueDeadlines ctxValue = iota +) + +// DeadlineConfig controls the timeing of a Switch-Over operation. +type DeadlineConfig struct { + BeginAt time.Time // The start-time of the Switch-Over. + ConsensusTimeout time.Duration // Amount of time to wait for consensus amongst all nodes before aborting. + PauseDelay time.Duration // How long to wait after starting before beginning the global pause. + PauseTimeout time.Duration // Timeout to achieve global pause before aborting. + MaxPause time.Duration // Absolute maximum amount of time for any operation to be delayed due to the Switch-Over. + NoPauseAPI bool // Allow HTTP/API requests during Pause phase. +} + +// DefaultConfig returns the default deadline configuration. +func DefaultConfig() DeadlineConfig { + return DeadlineConfig{ + ConsensusTimeout: 3 * time.Second, + PauseDelay: 5 * time.Second, + PauseTimeout: 10 * time.Second, + MaxPause: 13 * time.Second, + } +} + +// ConfigFromContext returns the DeadlineConfig associated with the current context. +func ConfigFromContext(ctx context.Context) DeadlineConfig { + d, _ := ctx.Value(ctxValueDeadlines).(DeadlineConfig) + return d +} + +// PauseDeadline will return the deadline to achieve global pause. +func (cfg DeadlineConfig) PauseDeadline() time.Time { + return cfg.BeginAt.Add(cfg.PauseDelay + cfg.PauseTimeout) +} + +// ConsensusDeadline will return the deadline for consensus amonst all nodes. +func (cfg DeadlineConfig) ConsensusDeadline() time.Time { + return cfg.BeginAt.Add(cfg.ConsensusTimeout) +} + +// PauseAt will return the time global pause begins. +func (cfg DeadlineConfig) PauseAt() time.Time { + return cfg.BeginAt.Add(cfg.PauseDelay) +} + +// AbsoluteDeadline will calculate the absolute deadline of the entire switchover operation. +func (cfg DeadlineConfig) AbsoluteDeadline() time.Time { + return cfg.BeginAt.Add(cfg.PauseDelay + cfg.MaxPause) +} + +// Serialize returns a textual representation of DeadlineConfig that can be +// transmitted to other nodes. Offset should be time difference between +// the local clock and the central clock (i.e. Postgres). +func (cfg DeadlineConfig) Serialize(offset time.Duration) string { + v := make(url.Values) + v.Set("BeginAt", cfg.BeginAt.Add(-offset).Format(time.RFC3339Nano)) + v.Set("ConsensusTimeout", cfg.ConsensusTimeout.String()) + v.Set("PauseDelay", cfg.PauseDelay.String()) + v.Set("PauseTimeout", cfg.PauseTimeout.String()) + v.Set("MaxPause", cfg.MaxPause.String()) + noPauseAPI := "false" + if cfg.NoPauseAPI { + noPauseAPI = "true" + } + v.Set("NoPauseAPI", noPauseAPI) + return v.Encode() +} + +// ParseDeadlineConfig will parse deadline configuration (given by Serialize) from a string. +// Offset should be the time difference between the local clock and the central clock (i.e. Postgres). +func ParseDeadlineConfig(s string, offset time.Duration) (*DeadlineConfig, error) { + v, err := url.ParseQuery(s) + if err != nil { + return nil, err + } + begin, err := time.Parse(time.RFC3339Nano, v.Get("BeginAt")) + if err != nil { + return nil, err + } + p := func(name string) (dur time.Duration) { + if err != nil { + return dur + } + dur, err = time.ParseDuration(v.Get(name)) + return dur + } + + return &DeadlineConfig{ + BeginAt: begin.Add(offset), + ConsensusTimeout: p("ConsensusTimeout"), + PauseDelay: p("PauseDelay"), + PauseTimeout: p("PauseTimeout"), + MaxPause: p("MaxPause"), + NoPauseAPI: v.Get("NoPauseAPI") == "true", + }, err +} diff --git a/switchover/handler.go b/switchover/handler.go new file mode 100644 index 0000000000..17a47be033 --- /dev/null +++ b/switchover/handler.go @@ -0,0 +1,166 @@ +package switchover + +import ( + "context" + "database/sql" + "database/sql/driver" + "fmt" + "github.com/target/goalert/app/lifecycle" + "github.com/target/goalert/lock" + "github.com/target/goalert/util/log" + "io" + "sync" + + "github.com/lib/pq" + "github.com/pkg/errors" + uuid "github.com/satori/go.uuid" +) + +type Handler struct { + old, new *dbState + id string + dbNextURL string + + sendNotification *sql.Stmt + + nodeStatus map[string]Status + l *pq.Listener + + statusCh chan *Status + controlCh chan *DeadlineConfig + stateCh chan State + + mx sync.Mutex + + state State + app App +} + +type App interface { + Pause(context.Context) error + Resume() + Status() lifecycle.Status +} + +func NewHandler(ctx context.Context, oldC, newC driver.Connector, oldURL, newURL string) (*Handler, error) { + h := &Handler{ + id: uuid.NewV4().String(), + stateCh: make(chan State), + statusCh: make(chan *Status), + controlCh: make(chan *DeadlineConfig), + nodeStatus: make(map[string]Status), + state: StateStarting, + dbNextURL: newURL, + } + var err error + h.old, err = newDBState(ctx, oldC) + if err != nil { + return nil, errors.Wrap(err, "init old db") + } + log.Logf(ctx, "DB_URL time offset "+h.old.timeOffset.String()) + h.sendNotification, err = h.old.db.PrepareContext(ctx, `select pg_notify($1, $2)`) + if err != nil { + return nil, errors.Wrap(err, "prepare notify statement") + } + + h.new, err = newDBState(ctx, newC) + if err != nil { + return nil, errors.Wrap(err, "init new db") + } + log.Logf(ctx, "DB_URL_NEXT time offset "+h.new.timeOffset.String()) + diff := h.new.timeOffset - h.old.timeOffset + if diff < 0 { + diff = -diff + } + log.Logf(ctx, "DB time offsets differ by "+diff.String()) + + err = h.initListen(oldURL) + if err != nil { + return nil, errors.Wrap(err, "init DB listener") + } + + go h.loop() + return h, nil +} + +func (h *Handler) Abort() { + h.stateCh <- StateAbort +} + +func (h *Handler) Status() *Status { + h.mx.Lock() + defer h.mx.Unlock() + return h.status() +} +func (h *Handler) status() *Status { + return &Status{ + State: h.state, + NodeID: h.id, + Offset: h.old.timeOffset, + dbNext: dbNext(h.id, h.dbNextURL), + } +} + +func (h *Handler) DB() *sql.DB { + return sql.OpenDB(h) +} + +func (h *Handler) Connect(ctx context.Context) (c driver.Conn, err error) { + c, err = h.old.dbc.Connect(ctx) + if err != nil { + return nil, err + } + _, err = c.(driver.ExecerContext).ExecContext(ctx, `select pg_advisory_lock_shared($1)`, []driver.NamedValue{{Ordinal: 1, Value: int64(lock.GlobalSwitchOver)}}) + if err != nil { + c.Close() + return nil, err + } + + rows, err := c.(driver.QueryerContext). + QueryContext(ctx, `select current_state from switchover_state`, + nil, + ) + if err != nil { + c.Close() + return nil, err + } + + scan := make([]driver.Value, 1) + err = rows.Next(scan) + if err != nil { + c.Close() + return nil, err + } + var state string + switch t := scan[0].(type) { + case string: + state = t + case []byte: + state = string(t) + default: + return nil, fmt.Errorf("expected string for current_state value, got %t", t) + } + + if rows.Next(nil) != io.EOF { + c.Close() + return nil, errors.New("expected single row in switchover_state table") + } + rows.Close() + + switch state { + case "idle", "in_progress": + return c, nil + case "use_next_db": + c.Close() + h.stateCh <- StateComplete + return h.new.dbc.Connect(ctx) + } + + return nil, fmt.Errorf("invalid state %s", state) +} + +func (h *Handler) Driver() driver.Driver { return nil } + +func (h *Handler) SetApp(app App) { + h.app = app +} diff --git a/switchover/mainloop.go b/switchover/mainloop.go new file mode 100644 index 0000000000..6d68d5e57b --- /dev/null +++ b/switchover/mainloop.go @@ -0,0 +1,207 @@ +package switchover + +import ( + "context" + "fmt" + "github.com/target/goalert/app/lifecycle" + "github.com/target/goalert/util/log" + "sort" + "strings" + "time" + + "github.com/pkg/errors" +) + +func (h *Handler) setState(ctx context.Context, newState State) { + switch newState { + case StatePaused, StatePauseWait, StatePausing: + default: + h.app.Resume() + } + if h.state == StateComplete && newState != StateStarting { + return + } + if newState == StateAbort && !h.state.IsActive() { + // already aborted + return + } + if newState == h.state { + return + } + + h.state = newState + _, err := h.sendNotification.ExecContext(ctx, StateChannel, h.status().serialize()) + if err != nil { + log.Log(ctx, err) + } +} +func (s State) oneOf(state []State) bool { + for _, st := range state { + if st == s { + return true + } + } + return false +} +func (h *Handler) allNodes(state ...State) bool { + for _, s := range h.nodeStatus { + if !s.State.oneOf(state) { + return false + } + } + return true +} +func (h *Handler) updateNodeStatus(ctx context.Context, s *Status) bool { + if !s.MatchDBNext(h.dbNextURL) { + log.Logf(ctx, "Switch-Over Abort: NodeID="+s.NodeID+" has mismatched db-next-url") + h.setState(ctx, StateAbort) + } + + oldStatus, ok := h.nodeStatus[s.NodeID] + if oldStatus.State == s.State { + return false + } + h.nodeStatus[s.NodeID] = *s + if !ok { + log.Logf(ctx, "Switch-Over Join: NodeID="+s.NodeID) + } + + cnt := len(h.nodeStatus) + + statCount := make(map[State]int) + for _, s := range h.nodeStatus { + statCount[s.State]++ + } + stats := make([]string, 0, len(statCount)) + for state, n := range statCount { + stats = append(stats, fmt.Sprintf("%s %d/%d", state, n, cnt)) + } + sort.Strings(stats) + log.Logf(ctx, "Switch-Over State: %s", strings.Join(stats, ", ")) + if !ok && h.state != StateStarting && h.state != StateReady { + h.setState(ctx, StateAbort) + } + return true +} + +func (h *Handler) loop() { + ctx := context.Background() + ctx = log.WithField(ctx, "NodeID", h.id) + statusUpdateT := time.NewTicker(3 * time.Second) + defer statusUpdateT.Stop() + var cfg DeadlineConfig + deadline := time.NewTimer(0) + deadline.Stop() + var cancel func() + pauseDone := make(chan struct{}) + + abort := func() { + if cancel != nil { + cancel() + } + deadline.Stop() + h.setState(ctx, StateAbort) + } + var lastDeadline string + reset := func(name string, t time.Time) { + deadline.Stop() + deadline = time.NewTimer(time.Until(t)) + lastDeadline = name + } + + for { + select { + case <-pauseDone: + pauseDone = make(chan struct{}) + if h.state == StatePausing { + reset("Switch-Over", cfg.AbsoluteDeadline()) + h.setState(ctx, StatePaused) + } + case <-deadline.C: + switch h.state { + case StateComplete: + // already done + continue + case StateArmWait: + // start the pause + pauseDone = make(chan struct{}) + pauseCtx, pauseCancel := context.WithDeadline(ctx, cfg.PauseDeadline()) + pauseCtx = context.WithValue(pauseCtx, ctxValueDeadlines, cfg) + cancel = pauseCancel + reset("Pause", cfg.PauseDeadline()) + go func() { + err := h.app.Pause(pauseCtx) + if err != nil { + log.Log(pauseCtx, errors.Wrap(err, "pause application")) + abort() + return + } + close(pauseDone) + }() + h.setState(ctx, StatePausing) + default: + log.Logf(ctx, "Switch-Over: Deadline reached (%s), aborting", lastDeadline) + abort() + } + continue + case d := <-h.controlCh: + if h.state != StateReady { + log.Logf(ctx, "Switch-Over: Control received but not ready, aborting") + abort() + continue + } + log.Logf(ctx, "Switch-Over: Control BeginAt=%s", d.BeginAt.Format(time.RFC1123)) + log.Logf(ctx, "Switch-Over: Control ConsensusDeadline=%s", d.ConsensusDeadline().Format(time.RFC1123)) + log.Logf(ctx, "Switch-Over: Control PauseAt=%s", d.PauseAt().Format(time.RFC1123)) + log.Logf(ctx, "Switch-Over: Control PauseDeadline=%s", d.PauseDeadline().Format(time.RFC1123)) + log.Logf(ctx, "Switch-Over: Control AbsoluteDeadline=%s", d.AbsoluteDeadline().Format(time.RFC1123)) + log.Logf(ctx, "Switch-Over: PAUSE BEGINS IN %s", time.Until(d.PauseAt()).String()) + + cfg = *d + reset("Consensus", cfg.ConsensusDeadline()) + h.setState(ctx, StateArmed) + case <-statusUpdateT.C: + if h.state == StateStarting && h.app.Status() == lifecycle.StatusReady { + h.setState(ctx, StateReady) + continue + } + + _, err := h.sendNotification.ExecContext(ctx, StateChannel, h.status().serialize()) + if err != nil { + log.Log(ctx, err) + } + case state := <-h.stateCh: + switch state { + case StateAbort: + log.Logf(ctx, "Switch-Over: Got abort signal.") + abort() + continue + case StateStarting: + log.Logf(ctx, "Switch-Over: Got reset signal.") + abort() //reset + h.nodeStatus = make(map[string]Status) + default: + log.Logf(ctx, "Switch-Over: Got signal '%s'.", state) + } + + h.setState(ctx, state) + case stat := <-h.statusCh: + if !h.updateNodeStatus(ctx, stat) { + continue + } + switch h.state { + case StateArmed: + if h.allNodes(StateArmed, StateArmWait) { + log.Logf(ctx, "Switch-Over: Consensus reached after %s", time.Since(cfg.BeginAt).String()) + reset("PauseAt", cfg.PauseAt()) + h.setState(ctx, StateArmWait) + } + case StatePaused: + if h.allNodes(StatePaused, StatePauseWait) { + log.Logf(ctx, "Switch-Over: World paused after %s", time.Since(cfg.PauseAt()).String()) + h.setState(ctx, StatePauseWait) + } + } + } + } +} diff --git a/switchover/notify.go b/switchover/notify.go new file mode 100644 index 0000000000..3699d1892e --- /dev/null +++ b/switchover/notify.go @@ -0,0 +1,105 @@ +package switchover + +import ( + "context" + "github.com/target/goalert/util/log" + "net/url" + "time" + + "github.com/lib/pq" + "github.com/pkg/errors" +) + +const ( + StateChannel = "goalert_switchover_state" + ControlChannel = "goalert_switchover_control" +) + +func (h *Handler) initListen(name string) error { + u, err := url.Parse(name) + if err != nil { + return errors.Wrap(err, "parse db URL") + } + q := u.Query() + q.Set("application_name", "GoAlert Switch-Over Listener") + u.RawQuery = q.Encode() + name = u.String() + + h.l = pq.NewListener(name, 0, time.Second, h.listenEvent) + + err = h.l.Listen(StateChannel) + if err != nil { + h.l.Close() + return err + } + err = h.l.Listen(ControlChannel) + if err != nil { + h.l.Close() + return err + } + go h.listenLoop() + return nil +} + +func (h *Handler) pushState(s State) { h.stateCh <- s } + +func (h *Handler) listenLoop() { + ctx := context.Background() + ctx = log.WithField(ctx, "NodeID", h.id) + + for n := range h.l.NotificationChannel() { + if n == nil { + // nil can be sent, ignore + continue + } + switch n.Channel { + case StateChannel: + s, err := ParseStatus(n.Extra) + if err != nil { + log.Log(ctx, errors.Wrap(err, "parse Status string")) + continue + } + if s.State == StateAbort { + go h.pushState(StateAbort) + } + h.statusCh <- s + case ControlChannel: + switch n.Extra { + case "done": + go h.pushState(StateComplete) + continue + case "abort": + go h.pushState(StateAbort) + continue + case "reset": + go h.pushState(StateStarting) + continue + } + + d, err := ParseDeadlineConfig(n.Extra, h.old.timeOffset) + if err != nil { + log.Log(ctx, errors.Wrap(err, "parse Deadlines string")) + continue + } + h.controlCh <- d + } + } +} +func (h *Handler) listenEvent(ev pq.ListenerEventType, err error) { + var event string + switch ev { + case pq.ListenerEventConnected: + event = "connected" + case pq.ListenerEventConnectionAttemptFailed: + event = "connection attempt failed" + case pq.ListenerEventDisconnected: + event = "disconnected" + case pq.ListenerEventReconnected: + event = "reconnected" + } + if err != nil { + log.Log(context.Background(), errors.Wrapf(err, "pq listen event '%s'", event)) + } else { + log.Logf(context.Background(), "PQ Listen Event: %s", event) + } +} diff --git a/switchover/state.go b/switchover/state.go new file mode 100644 index 0000000000..23ee0a0497 --- /dev/null +++ b/switchover/state.go @@ -0,0 +1,27 @@ +package switchover + +// State indicates the current state of a node. +type State string + +// Possible states +const ( + StateStarting = State("starting") + StateReady = State("ready") + StateArmed = State("armed") + StateArmWait = State("armed-waiting") + StatePausing = State("pausing") + StatePaused = State("paused") + StatePauseWait = State("paused-waiting") + StateComplete = State("complete") + StateAbort = State("aborted") +) + +// IsActive will return true if the state represents +// an on-going change-over event. +func (s State) IsActive() bool { + switch s { + case StateArmed, StateArmWait, StatePausing, StatePaused, StatePauseWait: + return true + } + return false +} diff --git a/switchover/status.go b/switchover/status.go new file mode 100644 index 0000000000..1636df11aa --- /dev/null +++ b/switchover/status.go @@ -0,0 +1,81 @@ +package switchover + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "net/url" + "strconv" + "time" +) + +// Status represents the status of an individual node. +type Status struct { + NodeID string + State State + Offset time.Duration + At time.Time + + ActiveRequests int + + dbNext []byte +} + +func ParseStatus(str string) (*Status, error) { + v, err := url.ParseQuery(str) + if err != nil { + return nil, err + } + dbNext, err := base64.StdEncoding.DecodeString(v.Get("DBNext")) + if err != nil { + return nil, err + } + reqs, err := strconv.Atoi(v.Get("ActiveRequests")) + if err != nil { + return nil, err + } + + s := &Status{ + NodeID: v.Get("NodeID"), + State: State(v.Get("State")), + At: time.Now(), + dbNext: dbNext, + + ActiveRequests: reqs, + } + + s.Offset, err = time.ParseDuration(v.Get("Offset")) + if err != nil { + return nil, err + } + return s, nil +} + +func stripAppName(urlStr string) string { + u, err := url.Parse(urlStr) + if err != nil { + panic(err) + } + q := u.Query() + q.Del("application_name") + u.RawQuery = q.Encode() + return u.String() +} + +// MatchDBNext will return true if the Status indicates a +// matching db-next-url. +func (s Status) MatchDBNext(dbNextURL string) bool { + return hmac.Equal(s.dbNext, dbNext(s.NodeID, dbNextURL)) +} +func dbNext(id, url string) []byte { + return hmac.New(sha256.New, []byte(stripAppName(url))).Sum([]byte(id)) +} +func (s Status) serialize() string { + v := make(url.Values) + v.Set("NodeID", s.NodeID) + v.Set("State", string(s.State)) + v.Set("Offset", s.Offset.String()) + v.Set("DBNext", base64.StdEncoding.EncodeToString(s.dbNext)) + v.Set("ActiveRequests", strconv.Itoa(s.ActiveRequests)) + return v.Encode() +} diff --git a/timezone/search.go b/timezone/search.go new file mode 100644 index 0000000000..a0ad32c929 --- /dev/null +++ b/timezone/search.go @@ -0,0 +1,130 @@ +package timezone + +import ( + "context" + "database/sql" + "github.com/target/goalert/permission" + "github.com/target/goalert/search" + "github.com/target/goalert/validation/validate" + "strconv" + "text/template" + + "github.com/lib/pq" + "github.com/pkg/errors" +) + +// SearchOptions allow filtering and paginating the list of timezones. +type SearchOptions struct { + Search string `json:"s,omitempty"` + After SearchCursor `json:"a,omitempty"` + + // Omit specifies a list of timezone names to exclude from the results. + Omit []string `json:"o,omitempty"` + + Limit int `json:"-"` +} + +// SearchCursor is used to indicate a position in a paginated list. +type SearchCursor struct { + Name string `json:"n,omitempty"` +} + +var searchTemplate = template.Must(template.New("search").Parse(` + SELECT + name + FROM pg_timezone_names tz + WHERE true + {{if .Omit}} + AND not tz.name = any(:omit) + {{end}} + {{if .SearchStr}} + AND (tz.name ILIKE :search) + {{end}} + {{if .After.Name}} + AND lower(tz.name) > lower(:afterName) + {{end}} + ORDER BY lower(tz.name) + LIMIT {{.Limit}} +`)) + +type renderData SearchOptions + +func (opts renderData) SearchStr() string { + if opts.Search == "" { + return "" + } + + return "%" + search.Escape(opts.Search) + "%" +} + +func (opts renderData) Normalize() (*renderData, error) { + if opts.Limit == 0 { + opts.Limit = search.DefaultMaxResults + } + + err := validate.Many( + validate.Text("Search", opts.Search, 0, search.MaxQueryLen), + validate.Range("Limit", opts.Limit, 0, search.MaxResults), + validate.Range("Omit", len(opts.Omit), 0, 50), + ) + if opts.After.Name != "" { + err = validate.Many(err, validate.Text("After.Name", opts.After.Name, 1, 255)) + } + if err != nil { + return nil, err + } + for i, name := range opts.Omit { + err = validate.Many(err, + validate.Text("Omit["+strconv.Itoa(i)+"]", name, 1, 255), + ) + } + + return &opts, err +} + +func (opts renderData) QueryArgs() []sql.NamedArg { + return []sql.NamedArg{ + sql.Named("search", opts.SearchStr()), + sql.Named("afterName", opts.After.Name), + sql.Named("omit", pq.StringArray(opts.Omit)), + } +} + +func (store *Store) Search(ctx context.Context, opts *SearchOptions) ([]string, error) { + err := permission.LimitCheckAny(ctx, permission.User) + if err != nil { + return nil, err + } + if opts == nil { + opts = &SearchOptions{} + } + data, err := (*renderData)(opts).Normalize() + if err != nil { + return nil, err + } + query, args, err := search.RenderQuery(ctx, searchTemplate, data) + if err != nil { + return nil, errors.Wrap(err, "render query") + } + + rows, err := store.db.QueryContext(ctx, query, args...) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, err + } + defer rows.Close() + + var result []string + var name string + for rows.Next() { + err = rows.Scan(&name) + if err != nil { + return nil, err + } + result = append(result, name) + } + + return result, nil +} diff --git a/timezone/store.go b/timezone/store.go new file mode 100644 index 0000000000..8f90b29867 --- /dev/null +++ b/timezone/store.go @@ -0,0 +1,16 @@ +package timezone + +import ( + "context" + "database/sql" +) + +type Store struct { + db *sql.DB +} + +func NewStore(ctx context.Context, db *sql.DB) *Store { + return &Store{ + db: db, + } +} diff --git a/user/authsubject.go b/user/authsubject.go new file mode 100644 index 0000000000..40c3030245 --- /dev/null +++ b/user/authsubject.go @@ -0,0 +1,30 @@ +package user + +import ( + "github.com/target/goalert/validation/validate" +) + +// An AuthSubject contains information about the auth provider and subject ID for a particular user. +type AuthSubject struct { + // ProviderID is the ID for the provider of the user. + ProviderID string + + // SubjectID is the ID for the subject of the user. + SubjectID string + + // UserID is the ID of the user. + UserID string +} + +// Normalize will validate and produce a normalized AuthSubject struct. +func (a AuthSubject) Normalize() (*AuthSubject, error) { + err := validate.Many( + validate.SubjectID("SubjectID", a.SubjectID), + validate.SubjectID("ProviderID", a.ProviderID), + validate.UUID("UserID", a.UserID), + ) + if err != nil { + return nil, err + } + return &a, nil +} diff --git a/user/contactmethod/contactmethod.go b/user/contactmethod/contactmethod.go new file mode 100644 index 0000000000..6ad84f12b5 --- /dev/null +++ b/user/contactmethod/contactmethod.go @@ -0,0 +1,45 @@ +package contactmethod + +import ( + "github.com/target/goalert/validation/validate" + + uuid "github.com/satori/go.uuid" +) + +// ContactMethod stores the information for contacting a user. +type ContactMethod struct { + ID string `json:"id"` + Name string `json:"name"` + Type Type `json:"type"` + Value string `json:"value"` + Disabled bool `json:"disabled"` + UserID string `json:"-"` +} + +// Normalize will validate and 'normalize' the ContactMethod -- such as making email lower-case +// and setting carrier to "" (for non-phone types). +func (c ContactMethod) Normalize() (*ContactMethod, error) { + if c.ID == "" { + c.ID = uuid.NewV4().String() + } + err := validate.Many( + validate.UUID("ID", c.ID), + validate.IDName("Name", c.Name), + validate.OneOf("Type", c.Type, TypeSMS, TypeVoice, TypeEmail, TypePush), + ) + + switch c.Type { + case TypeSMS, TypeVoice: + err = validate.Many(err, validate.Phone("Value", c.Value)) + case TypeEmail: + err = validate.Many(err, validate.Email("Value", c.Value)) + case TypePush: + c.Value = "" + } + + if err != nil { + return nil, err + } + + return &c, nil +} diff --git a/user/contactmethod/contactmethod_test.go b/user/contactmethod/contactmethod_test.go new file mode 100644 index 0000000000..ae4c0c2ef4 --- /dev/null +++ b/user/contactmethod/contactmethod_test.go @@ -0,0 +1,36 @@ +package contactmethod + +import ( + "testing" +) + +func TestContactMethod_Normalize(t *testing.T) { + test := func(valid bool, cm ContactMethod) { + name := "valid" + if !valid { + name = "invalid" + } + t.Run(name, func(t *testing.T) { + t.Logf("%+v", cm) + _, err := cm.Normalize() + if valid && err != nil { + t.Errorf("got %v; want nil", err) + } else if !valid && err == nil { + t.Errorf("got nil err; want non-nil") + } + }) + } + + valid := []ContactMethod{ + {Name: "Iphone", Type: TypeSMS, Value: "+15515108117"}, + } + invalid := []ContactMethod{ + {Name: "abcd", Type: TypeSMS, Value: "+15555555555"}, + } + for _, cm := range valid { + test(true, cm) + } + for _, cm := range invalid { + test(false, cm) + } +} diff --git a/user/contactmethod/store.go b/user/contactmethod/store.go new file mode 100644 index 0000000000..b486ca70eb --- /dev/null +++ b/user/contactmethod/store.go @@ -0,0 +1,359 @@ +package contactmethod + +import ( + "context" + "database/sql" + "github.com/target/goalert/permission" + "github.com/target/goalert/util" + "github.com/target/goalert/validation" + "github.com/target/goalert/validation/validate" + + "github.com/lib/pq" +) + +// Store allows the lookup and management of ContactMethods. +type Store interface { + Insert(context.Context, *ContactMethod) (*ContactMethod, error) + CreateTx(context.Context, *sql.Tx, *ContactMethod) (*ContactMethod, error) + Update(context.Context, *ContactMethod) error + UpdateTx(context.Context, *sql.Tx, *ContactMethod) error + Delete(ctx context.Context, id string) error + FindOne(ctx context.Context, id string) (*ContactMethod, error) + FindOneTx(ctx context.Context, tx *sql.Tx, id string) (*ContactMethod, error) + FindMany(ctx context.Context, ids []string) ([]ContactMethod, error) + FindAll(ctx context.Context, userID string) ([]ContactMethod, error) + DeleteTx(ctx context.Context, tx *sql.Tx, id ...string) error + DisableByValue(context.Context, Type, string) error +} + +// DB implements the ContactMethodStore against a *sql.DB backend. +type DB struct { + db *sql.DB + + insert *sql.Stmt + update *sql.Stmt + delete *sql.Stmt + findOne *sql.Stmt + findOneUpd *sql.Stmt + findMany *sql.Stmt + findAll *sql.Stmt + lookupUserID *sql.Stmt + disable *sql.Stmt + disablePhone *sql.Stmt + getType *sql.Stmt +} + +// NewDB will create a DB backend from a sql.DB. An error will be returned if statements fail to prepare. +func NewDB(ctx context.Context, db *sql.DB) (*DB, error) { + p := &util.Prepare{DB: db, Ctx: ctx} + return &DB{ + db: db, + disable: p.P(` + UPDATE user_contact_methods + SET disabled = true + WHERE type = $1 + AND value = $2 + `), + disablePhone: p.P(` + UPDATE user_contact_methods + SET disabled = true + WHERE (type = 'SMS' or type = 'VOICE') + AND value = $1 + `), + lookupUserID: p.P(` + SELECT DISTINCT user_id + FROM user_contact_methods + WHERE id = any($1) + `), + getType: p.P(` + SELECT type + FROM user_contact_methods + WHERE id = $1 + `), + insert: p.P(` + INSERT INTO user_contact_methods (id,name,type,value,disabled,user_id) + VALUES ($1,$2,$3,$4,$5,$6) + `), + findOne: p.P(` + SELECT id,name,type,value,disabled,user_id + FROM user_contact_methods + WHERE id = $1 + `), + findOneUpd: p.P(` + SELECT id,name,type,value,disabled,user_id + FROM user_contact_methods + WHERE id = $1 + FOR UPDATE + `), + findMany: p.P(` + SELECT id,name,type,value,disabled,user_id + FROM user_contact_methods + WHERE id = any($1) + `), + findAll: p.P(` + SELECT id,name,type,value,disabled,user_id + FROM user_contact_methods + WHERE user_id = $1 + `), + update: p.P(` + UPDATE user_contact_methods + SET name = $2, value = $3, disabled = $4 + WHERE id = $1 + `), + delete: p.P(` + DELETE FROM user_contact_methods + WHERE id = any($1) + `), + }, p.Err +} + +func (db *DB) DisableByValue(ctx context.Context, t Type, v string) error { + c := ContactMethod{Name: "Disable", Type: t, Value: v} + n, err := c.Normalize() + if err != nil { + return err + } + err = permission.LimitCheckAny(ctx, permission.System) + if err != nil { + return err + } + switch t { + case TypeSMS, TypeVoice: + _, err = db.disablePhone.ExecContext(ctx, n.Value) + default: + _, err = db.disable.ExecContext(ctx, n.Type, n.Value) + } + return err +} + +// Insert implements the ContactMethodStore interface by inserting the new ContactMethod into the database. +// A new ID is always created. +func (db *DB) Insert(ctx context.Context, c *ContactMethod) (*ContactMethod, error) { + return db.CreateTx(ctx, nil, c) +} + +// CreateTx implements the ContactMethodStore interface by inserting the new ContactMethod into the database. +// A new ID is always created. +func (db *DB) CreateTx(ctx context.Context, tx *sql.Tx, c *ContactMethod) (*ContactMethod, error) { + err := permission.LimitCheckAny(ctx, permission.System, permission.Admin, permission.MatchUser(c.UserID)) + if err != nil { + return nil, err + } + + n, err := c.Normalize() + if err != nil { + return nil, err + } + + _, err = wrapTx(ctx, tx, db.insert).ExecContext(ctx, n.ID, n.Name, n.Type, n.Value, n.Disabled, n.UserID) + if err != nil { + return nil, err + } + + return n, nil +} + +// Delete implements the ContactMethodStore interface. +func (db *DB) Delete(ctx context.Context, id string) error { + return db.DeleteTx(ctx, nil, id) +} + +func wrapTx(ctx context.Context, tx *sql.Tx, stmt *sql.Stmt) *sql.Stmt { + if tx == nil { + return stmt + } + + return tx.StmtContext(ctx, stmt) +} + +// DeleteTx implements the ContactMethodStore interface. +func (db *DB) DeleteTx(ctx context.Context, tx *sql.Tx, ids ...string) error { + err := permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return err + } + + if len(ids) == 0 { + return nil + } + + err = validate.ManyUUID("ContactMethodID", ids, 50) + if err != nil { + return err + } + + if permission.Admin(ctx) { + _, err = wrapTx(ctx, tx, db.delete).ExecContext(ctx, pq.StringArray(ids)) + return err + } + + rows, err := wrapTx(ctx, tx, db.lookupUserID).QueryContext(ctx, pq.StringArray(ids)) + if err != nil { + return err + } + defer rows.Close() + + var checks []permission.Checker + var userID string + for rows.Next() { + err = rows.Scan(&userID) + if err != nil { + return err + } + checks = append(checks, permission.MatchUser(userID)) + } + + err = permission.LimitCheckAny(ctx, checks...) + if err != nil { + return err + } + _, err = wrapTx(ctx, tx, db.delete).ExecContext(ctx, pq.StringArray(ids)) + return err +} + +// FindOneTx implements the ContactMethodStore interface. +func (db *DB) FindOneTx(ctx context.Context, tx *sql.Tx, id string) (*ContactMethod, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + err = validate.UUID("ContactMethodID", id) + if err != nil { + return nil, err + } + + var c ContactMethod + row := wrapTx(ctx, tx, db.findOneUpd).QueryRowContext(ctx, id) + err = row.Scan(&c.ID, &c.Name, &c.Type, &c.Value, &c.Disabled, &c.UserID) + if err != nil { + return nil, err + } + return &c, nil +} + +// FindOne implements the ContactMethodStore interface. +func (db *DB) FindOne(ctx context.Context, id string) (*ContactMethod, error) { + err := validate.UUID("ContactMethodID", id) + if err != nil { + return nil, err + } + + err = permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + + var c ContactMethod + row := db.findOne.QueryRowContext(ctx, id) + err = row.Scan(&c.ID, &c.Name, &c.Type, &c.Value, &c.Disabled, &c.UserID) + if err != nil { + return nil, err + } + return &c, nil +} + +// Update implements the ContactMethodStore interface. +func (db *DB) Update(ctx context.Context, c *ContactMethod) error { + return db.UpdateTx(ctx, nil, c) +} + +// UpdateTx implements the ContactMethodStore interface. +func (db *DB) UpdateTx(ctx context.Context, tx *sql.Tx, c *ContactMethod) error { + err := permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return err + } + + n, err := c.Normalize() + if err != nil { + return err + } + + row := wrapTx(ctx, tx, db.getType).QueryRowContext(ctx, n.ID) + var cmType Type + err = row.Scan(&cmType) + if err != nil { + return err + } + if n.Type != cmType { + return validation.NewFieldError("Type", "cannot update type of contact method") + } + + if permission.Admin(ctx) { + _, err = wrapTx(ctx, tx, db.update).ExecContext(ctx, n.ID, n.Name, n.Value, n.Disabled) + return err + } + + var userID string + + row = wrapTx(ctx, tx, db.lookupUserID).QueryRowContext(ctx, pq.StringArray{n.ID}) + err = row.Scan(&userID) + if err != nil { + return err + } + + n.UserID = userID + + err = permission.LimitCheckAny(ctx, permission.MatchUser(userID)) + if err != nil { + return err + } + + _, err = wrapTx(ctx, tx, db.update).ExecContext(ctx, n.ID, n.Name, n.Value, n.Disabled) + return err +} + +// FindMany will fetch all contact methods matching the given ids. +func (db *DB) FindMany(ctx context.Context, ids []string) ([]ContactMethod, error) { + err := validate.ManyUUID("ContactMethodID", ids, 50) + if err != nil { + return nil, err + } + + err = permission.LimitCheckAny(ctx, permission.User) + if err != nil { + return nil, err + } + + rows, err := db.findMany.QueryContext(ctx, pq.StringArray(ids)) + if err != nil { + return nil, err + } + defer rows.Close() + + return scanAll(rows) +} + +func scanAll(rows *sql.Rows) ([]ContactMethod, error) { + var contactMethods []ContactMethod + for rows.Next() { + var c ContactMethod + err := rows.Scan(&c.ID, &c.Name, &c.Type, &c.Value, &c.Disabled, &c.UserID) + if err != nil { + return nil, err + } + contactMethods = append(contactMethods, c) + } + return contactMethods, nil +} + +// FindAll implements the ContactMethodStore interface. +func (db *DB) FindAll(ctx context.Context, userID string) ([]ContactMethod, error) { + err := validate.UUID("UserID", userID) + if err != nil { + return nil, err + } + + err = permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + + rows, err := db.findAll.QueryContext(ctx, userID) + if err != nil { + return nil, err + } + defer rows.Close() + + return scanAll(rows) +} diff --git a/user/contactmethod/type.go b/user/contactmethod/type.go new file mode 100644 index 0000000000..1929256bea --- /dev/null +++ b/user/contactmethod/type.go @@ -0,0 +1,54 @@ +package contactmethod + +import ( + "fmt" + "github.com/target/goalert/notification" +) + +// Type specifies the medium a ContactMethod is notified. +type Type string + +// ContactMethod types +const ( + TypeVoice Type = "VOICE" + TypeSMS Type = "SMS" + TypeEmail Type = "EMAIL" + TypePush Type = "PUSH" +) + +// TypeFromDestType will return the Type associated with a +// notification.DestType. +func TypeFromDestType(t notification.DestType) Type { + switch t { + case notification.DestTypeSMS: + return TypeSMS + case notification.DestTypeVoice: + return TypeVoice + } + + return "" +} + +func (t Type) DestType() notification.DestType { + switch t { + case TypeSMS: + return notification.DestTypeSMS + case TypeVoice: + return notification.DestTypeVoice + } + return 0 +} + +// Scan handles reading a Type from the DB format +func (r *Type) Scan(value interface{}) error { + switch t := value.(type) { + case []byte: + *r = Type(t) + case string: + *r = Type(t) + default: + return fmt.Errorf("could not process unknown type for role %T", t) + } + + return nil +} diff --git a/user/favorite/store.go b/user/favorite/store.go new file mode 100644 index 0000000000..99fe977eef --- /dev/null +++ b/user/favorite/store.go @@ -0,0 +1,157 @@ +package favorite + +import ( + "context" + "database/sql" + "github.com/target/goalert/assignment" + "github.com/target/goalert/permission" + "github.com/target/goalert/util" + "github.com/target/goalert/validation/validate" + + "github.com/pkg/errors" +) + +// Store allows the lookup and management of Favorites. +type Store interface { + // Set will set a target as a favorite for the given userID. It is safe to call multiple times. + Set(ctx context.Context, userID string, tgt assignment.Target) error + // Unset will unset a target as a favorite for the given userID. It is safe to call multiple times. + Unset(ctx context.Context, userID string, tgt assignment.Target) error + + FindAll(ctx context.Context, userID string, filter []assignment.TargetType) ([]assignment.Target, error) +} + +// DB implements the Store interface using a postgres database. +type DB struct { + db *sql.DB + + insert *sql.Stmt + delete *sql.Stmt + findAll *sql.Stmt +} + +// NewDB will create a DB backend from a sql.DB. An error will be returned if statements fail to prepare. +func NewDB(ctx context.Context, db *sql.DB) (*DB, error) { + p := &util.Prepare{DB: db, Ctx: ctx} + return &DB{ + db: db, + insert: p.P(` + INSERT INTO user_favorites (user_id, tgt_service_id) + VALUES ($1, $2) + ON CONFLICT DO NOTHING + `), + delete: p.P(` + DELETE FROM user_favorites + WHERE user_id = $1 and + tgt_service_id = $2 + `), + findAll: p.P(` + SELECT tgt_service_id + FROM user_favorites + WHERE user_id = $1 + AND tgt_service_id NOTNULL = $2 + `), + }, p.Err +} + +// Set will store the target as a favorite of the given user. Must be authorized as System or the same user. +func (db *DB) Set(ctx context.Context, userID string, tgt assignment.Target) error { + err := permission.LimitCheckAny(ctx, permission.System, permission.MatchUser(userID)) + if err != nil { + return err + } + err = validate.Many( + validate.UUID("TargetID", tgt.TargetID()), + validate.UUID("UserID", userID), + validate.OneOf("TargetType", tgt.TargetType(), assignment.TargetTypeService), + ) + if err != nil { + return err + } + + _, err = db.insert.ExecContext(ctx, userID, tgt.TargetID()) + if err != nil { + return errors.Wrap(err, "set favorite") + } + + return nil +} + +// Unset will remove the target as a favorite of the given user. Must be authorized as System or the same user. +func (db *DB) Unset(ctx context.Context, userID string, tgt assignment.Target) error { + err := permission.LimitCheckAny(ctx, permission.System, permission.MatchUser(userID)) + if err != nil { + return err + } + + err = validate.Many( + validate.UUID("TargetID", tgt.TargetID()), + validate.UUID("UserID", userID), + validate.OneOf("TargetType", tgt.TargetType(), assignment.TargetTypeService), + ) + if err != nil { + return err + } + + _, err = db.delete.ExecContext(ctx, userID, tgt.TargetID()) + if err == sql.ErrNoRows { + // ignoring since it is safe to unset favorite (with retries) + err = nil + } + if err != nil { + return err + } + + return nil +} + +func (db *DB) FindAll(ctx context.Context, userID string, filter []assignment.TargetType) ([]assignment.Target, error) { + err := permission.LimitCheckAny(ctx, permission.System, permission.MatchUser(userID)) + if err != nil { + return nil, err + } + + err = validate.Many( + validate.UUID("UserID", userID), + validate.Range("Filter", len(filter), 0, 50), + ) + if err != nil { + return nil, err + } + + var allowServices bool + if len(filter) == 0 { + allowServices = true + } else { + for _, f := range filter { + switch f { + case assignment.TargetTypeService: + allowServices = true + } + } + } + + rows, err := db.findAll.QueryContext(ctx, userID, allowServices) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, err + } + defer rows.Close() + + var targets []assignment.Target + + for rows.Next() { + var svc sql.NullString + err = rows.Scan(&svc) + if err != nil { + return nil, err + } + switch { + case svc.Valid: + targets = append(targets, assignment.ServiceTarget(svc.String)) + } + } + return targets, nil +} diff --git a/user/notificationrule/notificationrule.go b/user/notificationrule/notificationrule.go new file mode 100644 index 0000000000..356b28fe9d --- /dev/null +++ b/user/notificationrule/notificationrule.go @@ -0,0 +1,33 @@ +package notificationrule + +import ( + "github.com/target/goalert/validation/validate" +) + +type NotificationRule struct { + ID string `json:"id"` + UserID string `json:"-"` + DelayMinutes int `json:"delay"` + ContactMethodID string `json:"contact_method_id"` +} + +func validateDelay(d int) error { + return validate.Range("DelayMinutes", d, 0, 9000) +} + +func (n NotificationRule) Normalize(update bool) (*NotificationRule, error) { + err := validateDelay(n.DelayMinutes) + + if !update { + err = validate.Many( + err, + validate.UUID("ContactMethodID", n.ContactMethodID), + validate.UUID("UserID", n.UserID), + ) + } + if err != nil { + return nil, err + } + + return &n, nil +} diff --git a/user/notificationrule/notificationrule_test.go b/user/notificationrule/notificationrule_test.go new file mode 100644 index 0000000000..8687002906 --- /dev/null +++ b/user/notificationrule/notificationrule_test.go @@ -0,0 +1,36 @@ +package notificationrule + +import ( + "testing" +) + +func TestNotificationRule_Normalize(t *testing.T) { + test := func(valid bool, nr NotificationRule) { + name := "valid" + if !valid { + name = "invalid" + } + t.Run(name, func(t *testing.T) { + t.Logf("%+v", nr) + _, err := nr.Normalize(false) + if valid && err != nil { + t.Errorf("got %v; want nil", err) + } else if !valid && err == nil { + t.Errorf("got nil err; want non-nil") + } + }) + } + + valid := []NotificationRule{ + {DelayMinutes: 5, ContactMethodID: "ececacc0-4764-012d-7bfb-002500d5dece", UserID: "bcefacc0-4764-012d-7bfb-002500d5decb"}, + } + invalid := []NotificationRule{ + {}, + } + for _, nr := range valid { + test(true, nr) + } + for _, nr := range invalid { + test(false, nr) + } +} diff --git a/user/notificationrule/store.go b/user/notificationrule/store.go new file mode 100644 index 0000000000..3a43dc354e --- /dev/null +++ b/user/notificationrule/store.go @@ -0,0 +1,258 @@ +package notificationrule + +import ( + "context" + "database/sql" + "github.com/target/goalert/permission" + "github.com/target/goalert/util" + + "github.com/target/goalert/validation/validate" + + "github.com/lib/pq" + uuid "github.com/satori/go.uuid" +) + +// Store allows the lookup and management of NotificationRules. +type Store interface { + Insert(context.Context, *NotificationRule) (*NotificationRule, error) + UpdateDelay(ctx context.Context, id string, delay int) error + Delete(ctx context.Context, id string) error + DeleteTx(ctx context.Context, tx *sql.Tx, ids ...string) error + CreateTx(context.Context, *sql.Tx, *NotificationRule) (*NotificationRule, error) + FindOne(ctx context.Context, id string) (*NotificationRule, error) + FindAll(ctx context.Context, userID string) ([]NotificationRule, error) + + WrapTx(*sql.Tx) Store + DoTx(func(Store) error) error +} + +// DB implements the NotificationRuleStore against a *sql.DB backend. +type DB struct { + db *sql.DB + + insert *sql.Stmt + update *sql.Stmt + delete *sql.Stmt + findOne *sql.Stmt + findAll *sql.Stmt + lookupUserID *sql.Stmt +} + +// NewDB will create a DB backend from a sql.DB. An error will be returned if statements fail to prepare. +func NewDB(ctx context.Context, db *sql.DB) (*DB, error) { + prep := &util.Prepare{DB: db, Ctx: ctx} + p := prep.P + s := &DB{db: db} + + s.insert = p("INSERT INTO user_notification_rules (id,user_id,delay_minutes,contact_method_id) VALUES ($1,$2,$3,$4)") + s.findOne = p("SELECT id,user_id,delay_minutes,contact_method_id FROM user_notification_rules WHERE id = $1 LIMIT 1") + s.findAll = p("SELECT id,user_id,delay_minutes,contact_method_id FROM user_notification_rules WHERE user_id = $1") + s.update = p("UPDATE user_notification_rules SET delay_minutes = $2 WHERE id = $1") + s.delete = p("DELETE FROM user_notification_rules WHERE id = any($1)") + s.lookupUserID = p("SELECT user_id FROM user_notification_rules WHERE id = any($1)") + + return s, prep.Err +} + +// WrapTx will wrap the NotificationRuleDB for use within the given transaction. +func (db *DB) WrapTx(tx *sql.Tx) Store { + return &DB{ + insert: tx.Stmt(db.insert), + findOne: tx.Stmt(db.findOne), + findAll: tx.Stmt(db.findAll), + update: tx.Stmt(db.update), + delete: tx.Stmt(db.delete), + } +} + +// DoTx will perform a transaction with the NotificationRuleStore. +func (d *DB) DoTx(f func(Store) error) error { + tx, err := d.db.Begin() + if err != nil { + return err + } + defer tx.Rollback() + + err = f(d.WrapTx(tx)) + if err != nil { + return err + } + + return tx.Commit() +} + +// Insert implements the NotificationRuleStore interface by inserting the new NotificationRule into the database. +// A new ID is always created. +func (db *DB) Insert(ctx context.Context, n *NotificationRule) (*NotificationRule, error) { + return db.CreateTx(ctx, nil, n) +} + +// CreateTx implements the NotificationRuleStore interface by inserting the new NotificationRule into the database. +// A new ID is always created. +func (db *DB) CreateTx(ctx context.Context, tx *sql.Tx, n *NotificationRule) (*NotificationRule, error) { + err := permission.LimitCheckAny(ctx, permission.System, permission.Admin, permission.MatchUser(n.UserID)) + if err != nil { + return nil, err + } + + n, err = n.Normalize(false) + if err != nil { + return nil, err + } + + n.ID = uuid.NewV4().String() + + _, err = wrapTx(ctx, tx, db.insert).ExecContext(ctx, n.ID, n.UserID, n.DelayMinutes, n.ContactMethodID) + if err != nil { + return nil, err + } + + return n, nil +} + +// Delete implements the NotificationRuleStore interface. +func (db *DB) Delete(ctx context.Context, id string) error { + return db.DeleteTx(ctx, nil, id) +} + +func wrapTx(ctx context.Context, tx *sql.Tx, stmt *sql.Stmt) *sql.Stmt { + if tx == nil { + return stmt + } + + return tx.StmtContext(ctx, stmt) +} + +// DeleteTx will delete notification rules with the provided ids. +func (db *DB) DeleteTx(ctx context.Context, tx *sql.Tx, ids ...string) error { + err := permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return err + } + + if len(ids) == 0 { + return nil + } + + err = validate.ManyUUID("NotificationRuleID", ids, 50) + if err != nil { + return err + } + + if permission.Admin(ctx) { + _, err = wrapTx(ctx, tx, db.delete).ExecContext(ctx, pq.StringArray(ids)) + return err + } + + rows, err := wrapTx(ctx, tx, db.lookupUserID).QueryContext(ctx, pq.StringArray(ids)) + if err != nil { + return err + } + defer rows.Close() + + var checks []permission.Checker + var userID string + for rows.Next() { + err = rows.Scan(&userID) + if err != nil { + return err + } + checks = append(checks, permission.MatchUser(userID)) + } + + err = permission.LimitCheckAny(ctx, checks...) + if err != nil { + return err + } + _, err = wrapTx(ctx, tx, db.delete).ExecContext(ctx, pq.StringArray(ids)) + return err +} + +// FindOne implements the NotificationRuleStore interface. +func (db *DB) FindOne(ctx context.Context, id string) (*NotificationRule, error) { + err := validate.UUID("NotificationRuleID", id) + if err != nil { + return nil, err + } + + err = permission.LimitCheckAny(ctx, permission.System, permission.Admin, permission.User) + if err != nil { + return nil, err + } + var n NotificationRule + row := db.findOne.QueryRowContext(ctx, id) + err = row.Scan(&n.ID, &n.UserID, &n.DelayMinutes, &n.ContactMethodID) + if err != nil { + return nil, err + } + return &n, nil +} + +// Update implements the NotificationRuleStore interface. +func (db *DB) UpdateDelay(ctx context.Context, id string, delay int) error { + err := validate.UUID("NotificationRuleID", id) + if err != nil { + return err + } + err = validateDelay(delay) + if err != nil { + return err + } + + err = permission.LimitCheckAny(ctx, permission.Admin, permission.User) + if err != nil { + return err + } + + if permission.Admin(ctx) { + _, err = db.update.ExecContext(ctx, id, delay) + return err + } + + var userID string + + row := db.lookupUserID.QueryRowContext(ctx, pq.StringArray{id}) + err = row.Scan(&userID) + if err != nil { + return err + } + + err = permission.LimitCheckAny(ctx, permission.MatchUser(userID)) + if err != nil { + return err + } + + _, err = db.update.ExecContext(ctx, id, delay) + return err +} + +// FindAll implements the NotificationRuleStore interface. +func (db *DB) FindAll(ctx context.Context, userID string) ([]NotificationRule, error) { + err := validate.UUID("UserID", userID) + if err != nil { + return nil, err + } + + err = permission.LimitCheckAny(ctx, permission.System, permission.User, permission.Admin) + if err != nil { + return nil, err + } + + rows, err := db.findAll.QueryContext(ctx, userID) + if err != nil { + return nil, err + } + defer rows.Close() + + notificationrules := []NotificationRule{} + for rows.Next() { + var n NotificationRule + err = rows.Scan(&n.ID, &n.UserID, &n.DelayMinutes, &n.ContactMethodID) + if err != nil { + return nil, err + } + notificationrules = append(notificationrules, n) + } + + return notificationrules, nil +} diff --git a/user/search.go b/user/search.go new file mode 100644 index 0000000000..593cd73a42 --- /dev/null +++ b/user/search.go @@ -0,0 +1,121 @@ +package user + +import ( + "context" + "database/sql" + "github.com/target/goalert/permission" + "github.com/target/goalert/search" + "github.com/target/goalert/validation/validate" + "text/template" + + "github.com/lib/pq" + "github.com/pkg/errors" +) + +// SearchOptions allow filtering and paginating the list of users. +type SearchOptions struct { + Search string `json:"s,omitempty"` + After SearchCursor `json:"a,omitempty"` + + // Omit specifies a list of user IDs to exclude from the results. + Omit []string `json:"o,omitempty"` + + Limit int `json:"-"` +} + +// SearchCursor is used to indicate a position in a paginated list. +type SearchCursor struct { + Name string `json:"n,omitempty"` +} + +var searchTemplate = template.Must(template.New("search").Parse(` + SELECT + id, name, email, role + FROM users usr + WHERE true + {{if .Omit}} + AND not id = any(:omit) + {{end}} + {{if .SearchStr}} + AND usr.name ILIKE :search + {{end}} + {{if .After.Name}} + AND lower(usr.name) > lower(:afterName) + {{end}} + ORDER BY lower(usr.name) + LIMIT {{.Limit}} +`)) + +type renderData SearchOptions + +func (opts renderData) SearchStr() string { + if opts.Search == "" { + return "" + } + + return "%" + search.Escape(opts.Search) + "%" +} + +func (opts renderData) Normalize() (*renderData, error) { + if opts.Limit == 0 { + opts.Limit = search.DefaultMaxResults + } + + err := validate.Many( + validate.Text("Search", opts.Search, 0, search.MaxQueryLen), + validate.Range("Limit", opts.Limit, 0, search.MaxResults), + validate.ManyUUID("Omit", opts.Omit, 50), + ) + if opts.After.Name != "" { + err = validate.Many(err, validate.IDName("After.Name", opts.After.Name)) + } + + return &opts, err +} + +func (opts renderData) QueryArgs() []sql.NamedArg { + return []sql.NamedArg{ + sql.Named("search", opts.SearchStr()), + sql.Named("afterName", opts.After.Name), + sql.Named("omit", pq.StringArray(opts.Omit)), + } +} + +func (db *DB) Search(ctx context.Context, opts *SearchOptions) ([]User, error) { + err := permission.LimitCheckAny(ctx, permission.User) + if err != nil { + return nil, err + } + if opts == nil { + opts = &SearchOptions{} + } + data, err := (*renderData)(opts).Normalize() + if err != nil { + return nil, err + } + query, args, err := search.RenderQuery(ctx, searchTemplate, data) + if err != nil { + return nil, errors.Wrap(err, "render query") + } + + rows, err := db.db.QueryContext(ctx, query, args...) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, err + } + defer rows.Close() + + var result []User + var u User + for rows.Next() { + err = rows.Scan(&u.ID, &u.Name, &u.Email, &u.Role) + if err != nil { + return nil, err + } + result = append(result, u) + } + + return result, nil +} diff --git a/user/store.go b/user/store.go new file mode 100644 index 0000000000..ba9654bab5 --- /dev/null +++ b/user/store.go @@ -0,0 +1,454 @@ +package user + +import ( + "context" + "database/sql" + "fmt" + "github.com/target/goalert/permission" + "github.com/target/goalert/util" + "github.com/target/goalert/validation/validate" + + "github.com/lib/pq" +) + +// Store allows the lookup and management of Users. +type Store interface { + Insert(context.Context, *User) (*User, error) + InsertTx(context.Context, *sql.Tx, *User) (*User, error) + Update(context.Context, *User) error + UpdateTx(context.Context, *sql.Tx, *User) error + Delete(context.Context, string) error + DeleteManyTx(context.Context, *sql.Tx, []string) error + FindOne(context.Context, string) (*User, error) + FindOneTx(ctx context.Context, tx *sql.Tx, id string, forUpdate bool) (*User, error) + FindAll(context.Context) ([]User, error) + FindMany(context.Context, []string) ([]User, error) + Search(context.Context, *SearchOptions) ([]User, error) + + AddAuthSubjectTx(ctx context.Context, tx *sql.Tx, a *AuthSubject) error + DeleteAuthSubjectTx(ctx context.Context, tx *sql.Tx, a *AuthSubject) error + FindAllAuthSubjectsForUser(ctx context.Context, userID string) ([]AuthSubject, error) + FindSomeAuthSubjectsForProvider(ctx context.Context, limit int, afterSubjectID, providerID string) ([]AuthSubject, error) +} + +var _ Store = &DB{} + +// DB implements the Store against a *sql.DB backend. +type DB struct { + db *sql.DB + + insert *sql.Stmt + update *sql.Stmt + delete *sql.Stmt + findOne *sql.Stmt + findAll *sql.Stmt + + findMany *sql.Stmt + deleteMany *sql.Stmt + + findOneForUpdate *sql.Stmt + + insertUserAuthSubject *sql.Stmt + deleteUserAuthSubject *sql.Stmt + + findAuthSubjectsByUser *sql.Stmt +} + +// NewDB will create a DB backend from a sql.DB. An error will be returned if statements fail to prepare. +func NewDB(ctx context.Context, db *sql.DB) (*DB, error) { + p := &util.Prepare{DB: db, Ctx: ctx} + return &DB{ + db: db, + + insert: p.P(` + INSERT INTO users ( + id, name, email, avatar_url, role, alert_status_log_contact_method_id + ) + VALUES ($1, $2, $3, $4, $5, $6) + `), + + update: p.P(` + UPDATE users + SET + name = $2, + email = $3, + alert_status_log_contact_method_id = $4 + WHERE id = $1 + `), + + delete: p.P(` + DELETE FROM users + WHERE id = $1 + `), + + findMany: p.P(` + SELECT + id, name, email, avatar_url, role, alert_status_log_contact_method_id + FROM users + WHERE id = any($1) + `), + deleteMany: p.P(`DELETE FROM users WHERE id = any($1)`), + + findOne: p.P(` + SELECT + id, name, email, avatar_url, role, alert_status_log_contact_method_id + FROM users + WHERE id = $1 + `), + findOneForUpdate: p.P(` + SELECT + id, name, email, avatar_url, role, alert_status_log_contact_method_id + FROM users + WHERE id = $1 + FOR UPDATE + `), + + findAuthSubjectsByUser: p.P(` + SELECT provider_id, subject_id + FROM auth_subjects + WHERE user_id = $1 + `), + + findAll: p.P(` + SELECT + id, name, email, avatar_url, role, alert_status_log_contact_method_id + FROM users + `), + + insertUserAuthSubject: p.P(` + INSERT into auth_subjects ( + user_id, provider_id, subject_id + ) + VALUES ($1, $2, $3) + ON CONFLICT DO NOTHING + `), + + deleteUserAuthSubject: p.P(` + DELETE FROM auth_subjects + WHERE + user_id = $1 AND + provider_id = $2 AND + subject_id = $3 + `), + }, p.Err +} + +func (db *DB) DeleteManyTx(ctx context.Context, tx *sql.Tx, ids []string) error { + err := permission.LimitCheckAny(ctx, permission.System) + if err != nil { + return err + } + if len(ids) == 0 { + return nil + } + + err = validate.Range("Count", len(ids), 1, 100) + if err != nil { + return err + } + + del := db.deleteMany + if tx != nil { + tx.StmtContext(ctx, del) + } + + _, err = del.ExecContext(ctx, pq.StringArray(ids)) + return err +} + +// InsertTx implements the Store interface by inserting the new User into the database. +// The insert statement is first wrapped in tx. +func (db *DB) InsertTx(ctx context.Context, tx *sql.Tx, u *User) (*User, error) { + n, err := u.Normalize() + if err != nil { + return nil, err + } + err = permission.LimitCheckAny(ctx, permission.System, permission.Admin) + if err != nil { + return nil, err + } + _, err = tx.Stmt(db.insert).ExecContext(ctx, n.fields()...) + if err != nil { + return nil, err + } + + return n, nil +} + +// Insert implements the Store interface by inserting the new User into the database. +func (db *DB) Insert(ctx context.Context, u *User) (*User, error) { + err := permission.LimitCheckAny(ctx, permission.System, permission.Admin) + if err != nil { + return nil, err + } + tx, err := db.db.BeginTx(ctx, nil) + if err != nil { + return nil, err + } + defer tx.Rollback() + + u, err = db.InsertTx(ctx, tx, u) + if err != nil { + return nil, err + } + err = tx.Commit() + if err != nil { + return nil, err + } + + return u, nil +} + +// Delete implements the UserStore interface. +func (db *DB) Delete(ctx context.Context, id string) error { + err := validate.UUID("UserID", id) + if err != nil { + return err + } + err = permission.LimitCheckAny(ctx, permission.System, permission.Admin) + if err != nil { + return err + } + _, err = db.delete.ExecContext(ctx, id) + return err +} + +// Update implements the Store interface. Only admins can update user roles. +func (db *DB) Update(ctx context.Context, u *User) error { + return db.UpdateTx(ctx, nil, u) +} +func (db *DB) UpdateTx(ctx context.Context, tx *sql.Tx, u *User) error { + n, err := u.Normalize() + if err != nil { + return err + } + + err = permission.LimitCheckAny(ctx, permission.System, permission.Admin, permission.MatchUser(u.ID)) + if err != nil { + return err + } + update := db.update + if tx != nil { + update = tx.StmtContext(ctx, update) + } + _, err = update.ExecContext(ctx, n.userUpdateFields()...) + return err +} + +func (db *DB) FindMany(ctx context.Context, ids []string) ([]User, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + + err = validate.ManyUUID("UserID", ids, 200) + if err != nil { + return nil, err + } + + rows, err := db.findMany.QueryContext(ctx, pq.StringArray(ids)) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, err + } + defer rows.Close() + + result := make([]User, 0, len(ids)) + var u User + for rows.Next() { + err = u.scanFrom(rows.Scan) + if err != nil { + return nil, err + } + result = append(result, u) + } + + return result, nil +} + +// FindOne implements the Store interface. +func (db *DB) FindOne(ctx context.Context, id string) (*User, error) { + return db.FindOneTx(ctx, nil, id, false) +} +func (db *DB) FindOneTx(ctx context.Context, tx *sql.Tx, id string, forUpdate bool) (*User, error) { + err := validate.UUID("UserID", id) + if err != nil { + return nil, err + } + + err = permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + + var u User + findOne := db.findOne + if forUpdate { + findOne = db.findOneForUpdate + } + if tx != nil { + findOne = tx.StmtContext(ctx, findOne) + } + row := findOne.QueryRowContext(ctx, id) + err = u.scanFrom(row.Scan) + if err != nil { + return nil, err + } + return &u, nil +} + +// FindSomeAuthSubjectsForProvider implements the Store interface. It finds all auth subjects associated with a given userID. +func (db *DB) FindSomeAuthSubjectsForProvider(ctx context.Context, limit int, afterSubjectID, providerID string) ([]AuthSubject, error) { + err := permission.LimitCheckAny(ctx, permission.System, permission.Admin) + if err != nil { + return nil, err + } + + // treat as a subject ID for now + err = validate.Many( + validate.SubjectID("ProviderID", providerID), + validate.Range("Limit", limit, 0, 9000), + ) + if afterSubjectID != "" { + err = validate.Many(err, validate.SubjectID("AfterID", afterSubjectID)) + } + if err != nil { + return nil, err + } + if limit == 0 { + limit = 50 + } + + q := fmt.Sprintf(` + SELECT user_id, subject_id + FROM auth_subjects + WHERE provider_id = $1 AND subject_id > $2 + ORDER BY subject_id + LIMIT %d + `, limit) + + rows, err := db.db.QueryContext(ctx, q, providerID, afterSubjectID) + if err != nil { + return nil, err + } + defer rows.Close() + var authSubjects []AuthSubject + for rows.Next() { + var a AuthSubject + a.ProviderID = providerID + + err = rows.Scan(&a.UserID, &a.SubjectID) + if err != nil { + return nil, err + } + authSubjects = append(authSubjects, a) + } + + return authSubjects, nil +} + +// FindAllAuthSubjectsForUser implements the Store interface. It finds all auth subjects associated with a given userID. +func (db *DB) FindAllAuthSubjectsForUser(ctx context.Context, userID string) ([]AuthSubject, error) { + err := permission.LimitCheckAny(ctx, permission.System, permission.Admin) + if err != nil { + return nil, err + } + + err = validate.UUID("UserID", userID) + if err != nil { + return nil, err + } + + var authSubjects []AuthSubject + rows, err := db.findAuthSubjectsByUser.QueryContext(ctx, userID) + if err != nil { + return nil, err + } + defer rows.Close() + for rows.Next() { + var a AuthSubject + a.UserID = userID + err = rows.Scan(&a.ProviderID, &a.SubjectID) + if err != nil { + return nil, err + } + authSubjects = append(authSubjects, a) + } + + return authSubjects, nil +} + +// FindAll implements the Store interface. +func (db *DB) FindAll(ctx context.Context) ([]User, error) { + err := permission.LimitCheckAny(ctx, permission.All) + if err != nil { + return nil, err + } + rows, err := db.findAll.QueryContext(ctx) + if err != nil { + return nil, err + } + defer rows.Close() + + users := []User{} + for rows.Next() { + var u User + err = u.scanFrom(rows.Scan) + if err != nil { + return nil, err + } + users = append(users, u) + } + + return users, nil +} + +// AddAuthSubjectTx implements the Store interface. It is used to add an auth subject to a given user. +func (db *DB) AddAuthSubjectTx(ctx context.Context, tx *sql.Tx, a *AuthSubject) error { + var userID string + if a != nil { + userID = a.UserID + } + err := permission.LimitCheckAny(ctx, permission.System, permission.Admin, permission.MatchUser(userID)) + if err != nil { + return err + } + + n, err := a.Normalize() + if err != nil { + return err + } + + s := db.insertUserAuthSubject + if tx != nil { + s = tx.Stmt(s) + } + _, err = s.ExecContext(ctx, a.UserID, n.ProviderID, n.SubjectID) + return err +} + +// DeleteAuthSubjectTx implements the Store interface. It is used to remove an auth subject for a given user. +func (db *DB) DeleteAuthSubjectTx(ctx context.Context, tx *sql.Tx, a *AuthSubject) error { + err := permission.LimitCheckAny(ctx, permission.System, permission.Admin) + if err != nil { + return err + } + + n, err := a.Normalize() + if err != nil { + return err + } + + s := db.deleteUserAuthSubject + if tx != nil { + s = tx.Stmt(s) + } + _, err = s.ExecContext(ctx, a.UserID, n.ProviderID, n.SubjectID) + if err != nil && err != sql.ErrNoRows { + // do not return error if auth subject doesn't exist + return err + } + return nil +} diff --git a/user/user.go b/user/user.go new file mode 100644 index 0000000000..71064dda7c --- /dev/null +++ b/user/user.go @@ -0,0 +1,131 @@ +package user + +import ( + "crypto/md5" + "database/sql" + "encoding/hex" + "fmt" + "github.com/target/goalert/permission" + "github.com/target/goalert/validation/validate" + + uuid "github.com/satori/go.uuid" +) + +// A User is the base information of a user of the system. Authentication details are stored +// separately based on the auth provider. +// +type User struct { + // ID is the unique identifier for the user + ID string `json:"id"` + + // Name is the full name of the user + Name string `json:"name"` + + // Email is the primary contact email for the user. It is used for account-related communications + Email string `json:"email"` + + // AvatarURL is an absolute address for an image to be used as the avatar. + AvatarURL string `json:"avatar_url"` + + // AlertStatusCMID defines a contact method ID for alert status updates. + AlertStatusCMID string `json:"alert_status_log_contact_method_id"` + + // The Role of the user + Role permission.Role `json:"role" store:"readonly"` +} + +// ResolveAvatarURL will resolve the user avatar URL, using the email if none is set. +func (u User) ResolveAvatarURL(fullSize bool) string { + if u.AvatarURL == "" { + suffix := "" + if fullSize { + suffix = "&s=2048" + } + sum := md5.Sum([]byte(u.Email)) + u.AvatarURL = fmt.Sprintf("https://gravatar.com/avatar/%s?d=retro%s", hex.EncodeToString(sum[:]), suffix) + } + return u.AvatarURL +} + +type scanFn func(...interface{}) error + +func (u *User) scanFrom(fn scanFn) error { + var statusCM sql.NullString + err := fn( + &u.ID, + &u.Name, + &u.Email, + &u.AvatarURL, + &u.Role, + &statusCM, + ) + u.AlertStatusCMID = statusCM.String + return err +} + +func (u *User) userUpdateFields() []interface{} { + var statusCM sql.NullString + if u.AlertStatusCMID != "" { + statusCM.Valid = true + statusCM.String = u.AlertStatusCMID + } + return []interface{}{ + u.ID, + u.Name, + u.Email, + statusCM, + } +} +func (u *User) fields() []interface{} { + var statusCM sql.NullString + if u.AlertStatusCMID != "" { + statusCM.Valid = true + statusCM.String = u.AlertStatusCMID + } + return []interface{}{ + u.ID, + u.Name, + u.Email, + u.AvatarURL, + u.Role, + statusCM, + } +} + +// Normalize will produce a normalized/validated User struct. +// Will only do the validate if email is not empty +func (u User) Normalize() (*User, error) { + var err error + if u.ID == "" { + u.ID = uuid.NewV4().String() + } + if u.Email != "" { + err = validate.Email("Email", u.Email) + // Sanitize Email after it has been validated. + u.Email = validate.SanitizeEmail(u.Email) + } + + if u.AvatarURL != "" { + err = validate.Many( + err, + validate.AbsoluteURL("AvatarURL", u.AvatarURL), + ) + } + + if u.AlertStatusCMID != "" { + err = validate.Many( + err, + validate.UUID("AlertStatusCMID", u.AlertStatusCMID), + ) + } + + err = validate.Many( + err, + validate.Name("Name", u.Name), + validate.OneOf("Role", u.Role, permission.RoleAdmin, permission.RoleUser), + ) + if err != nil { + return nil, err + } + return &u, nil +} diff --git a/user/user_test.go b/user/user_test.go new file mode 100644 index 0000000000..206c7e6531 --- /dev/null +++ b/user/user_test.go @@ -0,0 +1,37 @@ +package user + +import ( + "github.com/target/goalert/permission" + "testing" +) + +func TestUser_Normalize(t *testing.T) { + test := func(valid bool, u User) { + name := "valid" + if !valid { + name = "invalid" + } + t.Run(name, func(t *testing.T) { + t.Logf("%+v", u) + _, err := u.Normalize() + if valid && err != nil { + t.Errorf("got %v; want nil", err) + } else if !valid && err == nil { + t.Errorf("got nil; want") + } + }) + } + + valid := []User{ + {Name: "Joe", Role: permission.RoleAdmin, Email: "foo@bar.com"}, + } + invalid := []User{ + {}, + } + for _, u := range valid { + test(true, u) + } + for _, u := range invalid { + test(false, u) + } +} diff --git a/util/alignedticker.go b/util/alignedticker.go new file mode 100644 index 0000000000..800b79b12a --- /dev/null +++ b/util/alignedticker.go @@ -0,0 +1,75 @@ +package util + +import ( + "math/rand" + "sync" + "time" +) + +// AlignedTicker works like a time.Ticker except it will align the first tick. +// This makes it useful in situations where something should run on-the-minute +// for example. +type AlignedTicker struct { + tm *time.Timer + tc *time.Ticker + mx sync.Mutex + done bool + dur time.Duration + c chan time.Time + C <-chan time.Time +} + +// NewAlignedTicker will create and start a new AlignedTicker. The first tick +// will be adjusted to round, with variance added. +// +// For example (time.Minute, time.Second) will align ticks to on-the-minute +// plus 0-1 second. +func NewAlignedTicker(round, variance time.Duration) *AlignedTicker { + vary := time.Duration(rand.Int63n(int64(variance))) + s := time.Now().Round(round).Add(vary) + for s.Before(time.Now()) { + s = s.Add(round) + } + a := &AlignedTicker{ + c: make(chan time.Time), + dur: round, + } + a.C = a.c + a.tm = time.AfterFunc(time.Until(s), a.firstTick) + return a +} +func (a *AlignedTicker) firstTick() { + a.mx.Lock() + defer a.mx.Unlock() + if a.done { + return + } + + a.tc = time.NewTicker(a.dur) + go func() { + for t := range a.tc.C { + a.mx.Lock() + if a.done { + a.mx.Unlock() + break + } + a.c <- t + a.mx.Unlock() + } + }() +} + +// Stop will stop the running ticker and close the channel. +func (a *AlignedTicker) Stop() { + a.mx.Lock() + defer a.mx.Unlock() + if a.done { + return + } + a.done = true + a.tm.Stop() + if a.tc != nil { + a.tc.Stop() + } + close(a.c) +} diff --git a/util/contextcache.go b/util/contextcache.go new file mode 100644 index 0000000000..5ef907fac4 --- /dev/null +++ b/util/contextcache.go @@ -0,0 +1,130 @@ +package util + +import ( + "context" + "net/http" + + uuid "github.com/satori/go.uuid" +) + +type cacheableKey string + +const cacheableKeyID = cacheableKey("cache-id") + +func cacheableContext(ctx context.Context) context.Context { + return context.WithValue(ctx, cacheableKeyID, uuid.NewV4().String()) +} + +// WrapCacheableContext will make all request contexts cacheable, to be used with +// a ContextCache. +func WrapCacheableContext(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + next.ServeHTTP(w, req.WithContext(cacheableContext(req.Context()))) + }) +} + +// A ContextCache is used to cache and load values on a per-context basis. +// No values will be stored unless the context.Context has passed through +// WrapCacheableContext. +type ContextCache interface { + Load(ctx context.Context, id string) interface{} + Store(ctx context.Context, id string, value interface{}) + LoadOrStore(context.Context, string, func() (interface{}, error)) (interface{}, error) +} + +type cacheRegister struct { + cID string + + id string + value interface{} + + done <-chan struct{} +} +type cacheRequest struct { + cID string + id string + ch chan interface{} +} + +type chanCache struct { + setCh chan *cacheRegister + getCh chan *cacheRequest + cleanCh chan string + + cache map[string]map[string]interface{} +} + +// NewContextCache creates a new ContextCache +func NewContextCache() ContextCache { + return newChanCache() +} + +func newChanCache() *chanCache { + c := &chanCache{ + setCh: make(chan *cacheRegister), + getCh: make(chan *cacheRequest), + cleanCh: make(chan string), + + cache: make(map[string]map[string]interface{}, 4000), + } + go c.loop() + return c +} +func (c *chanCache) cleanup(cid string, ch <-chan struct{}) { + <-ch + c.cleanCh <- cid +} +func (c *chanCache) loop() { + for { + select { + case cid := <-c.cleanCh: + delete(c.cache, cid) + case reg := <-c.setCh: + m := c.cache[reg.cID] + if m == nil { + m = make(map[string]interface{}) + c.cache[reg.cID] = m + go c.cleanup(reg.cID, reg.done) + } + m[reg.id] = reg.value + case req := <-c.getCh: + m := c.cache[req.cID] + if m == nil { + req.ch <- nil + } + req.ch <- m[req.id] + } + } +} + +func (c *chanCache) Load(ctx context.Context, id string) interface{} { + cID, ok := ctx.Value(cacheableKeyID).(string) + if !ok { + return nil + } + + ch := make(chan interface{}, 1) + c.getCh <- &cacheRequest{cID: cID, ch: ch, id: id} + + return <-ch +} +func (c *chanCache) LoadOrStore(ctx context.Context, id string, fn func() (interface{}, error)) (interface{}, error) { + v := c.Load(ctx, id) + if v != nil { + return v, nil + } + + v, err := fn() + if err == nil && v != nil { + c.Store(ctx, id, v) + } + return v, err +} +func (c *chanCache) Store(ctx context.Context, id string, val interface{}) { + cID, ok := ctx.Value(cacheableKeyID).(string) + if !ok { + return + } + + c.setCh <- &cacheRegister{cID: cID, id: id, value: val, done: ctx.Done()} +} diff --git a/util/contextcache_test.go b/util/contextcache_test.go new file mode 100644 index 0000000000..56275aa167 --- /dev/null +++ b/util/contextcache_test.go @@ -0,0 +1,89 @@ +package util + +import ( + "context" + "errors" + "testing" +) + +func TestCache(t *testing.T) { + c := NewContextCache() + + c1, can1 := context.WithCancel(cacheableContext(context.Background())) + c2, can2 := context.WithCancel(cacheableContext(context.Background())) + c3 := context.Background() + defer can1() + defer can2() + + testLoad := func(ctx context.Context, key, val string) { + t.Run("Load", func(t *testing.T) { + v := c.Load(ctx, key) + if val == "" && v != nil { + t.Fatalf("load unknown key: got %+v; want nil", v) + } + if val == "" { + return + } + + if v.(string) != val { + t.Errorf("load stored string: got '%s'; want '%s'", v.(string), val) + } + }) + } + + // non-wrapped contexts don't cache anything + testLoad(c3, "foo", "") + c.Store(c3, "foo", "bar") + testLoad(c3, "foo", "") + + testLoad(c1, "foo", "") + c.Store(c1, "foo", "bar") + testLoad(c1, "foo", "bar") + c.Store(c1, "foo", "baz") + testLoad(c1, "foo", "baz") + testLoad(c2, "foo", "") // cache should be per-cacheableContext + + type k string + + // cache should follow all child contexts + c1Sub := context.WithValue(c1, k("test"), nil) + testLoad(c1Sub, "foo", "baz") + c.Store(c1Sub, "foo", "blah") + testLoad(c1Sub, "foo", "blah") + testLoad(c1, "foo", "blah") + + testLS := func(key, retrVal string, retrErr error, expVal string, expErr error) { + t.Run("LoadOrStore", func(t *testing.T) { + v, err := c.LoadOrStore(c1, key, func() (interface{}, error) { + return retrVal, retrErr + }) + if err != expErr { + t.Errorf("err was %+v; want %+v", err, expErr) + } + m, _ := v.(string) + if m != expVal { + t.Errorf("val was '%s'; want '%s'", m, expVal) + } + }) + } + e := errors.New("broke") + data := []struct { + key string + retV string + retE error + expV string + expE error + }{ + {"foo", "a", nil, "bar", nil}, + {"foo", "a", e, "bar", nil}, + {"bin", "baz", e, "baz", e}, + {"bin", "foo", e, "foo", e}, //don't store on err + {"bin", "foo", nil, "foo", nil}, + {"bin", "fwah", e, "foo", nil}, + } + c.Store(c1, "foo", "bar") + for _, d := range data { + testLS(d.key, d.retV, d.retE, d.expV, d.expE) + } + +} diff --git a/util/contextroundtripper.go b/util/contextroundtripper.go new file mode 100644 index 0000000000..d9b3dddca1 --- /dev/null +++ b/util/contextroundtripper.go @@ -0,0 +1,28 @@ +package util + +import ( + "context" + "net/http" +) + +type ctxTransport struct { + rt http.RoundTripper + ctx context.Context +} + +func (t *ctxTransport) RoundTrip(req *http.Request) (*http.Response, error) { + return t.rt.RoundTrip(req.WithContext(t.ctx)) +} + +// ContextRoundTripper will return an http.RoundTripper that will replace all request contexts +// with the provided one. This means that values and deadlines for all requests will be bound +// to the original context. +func ContextRoundTripper(ctx context.Context, rt http.RoundTripper) http.RoundTripper { + if rt == nil { + rt = http.DefaultTransport + } + return &ctxTransport{ + rt: rt, + ctx: ctx, + } +} diff --git a/util/contextwaitgroup.go b/util/contextwaitgroup.go new file mode 100644 index 0000000000..23cacac04f --- /dev/null +++ b/util/contextwaitgroup.go @@ -0,0 +1,85 @@ +package util + +import "context" + +type ContextWaitGroup struct { + count int + + ctx context.Context + nCh chan int + wCh chan chan struct{} + + notify []chan struct{} +} + +func NewContextWaitGroup(ctx context.Context) *ContextWaitGroup { + wg := &ContextWaitGroup{ + ctx: ctx, + nCh: make(chan int), + wCh: make(chan chan struct{}), + } + go wg.loop() + return wg +} +func (c *ContextWaitGroup) loop() { + done := func() { + for _, ch := range c.notify { + close(ch) + } + c.notify = nil + } + defer done() +mainLoop: + for { + select { + case ch := <-c.wCh: + if c.count == 0 { + close(ch) + continue + } + c.notify = append(c.notify, ch) + case <-c.ctx.Done(): + break mainLoop + case n := <-c.nCh: + c.count += n + if c.count == 0 { + done() + continue + } + if c.count < 0 { + panic("Done() called too many times") + } + } + } + +cleanup: + for { + select { + case <-c.nCh: + case ch := <-c.wCh: + close(ch) + default: + break cleanup + } + } +} +func (c *ContextWaitGroup) Add(n int) { + if c.ctx.Err() != nil { + return + } + c.nCh <- n +} +func (c *ContextWaitGroup) WaitCh() <-chan struct{} { + ch := make(chan struct{}) + c.wCh <- ch + return ch +} +func (c *ContextWaitGroup) Done() { + if c.ctx.Err() != nil { + return + } + c.nCh <- -1 +} +func (c *ContextWaitGroup) Wait() { + <-c.WaitCh() +} diff --git a/util/errutil/httperror.go b/util/errutil/httperror.go new file mode 100644 index 0000000000..b984ad85a6 --- /dev/null +++ b/util/errutil/httperror.go @@ -0,0 +1,76 @@ +package errutil + +import ( + "context" + "database/sql" + "github.com/target/goalert/limit" + "github.com/target/goalert/permission" + "github.com/target/goalert/util/log" + "github.com/target/goalert/validation" + "net/http" + + "github.com/lib/pq" + + "github.com/pkg/errors" +) + +func isCtxCause(err error) bool { + if err == context.Canceled { + return true + } + if err == context.DeadlineExceeded { + return true + } + if err == sql.ErrTxDone { + return true + } + + // 57014 = query_canceled + // https://www.postgresql.org/docs/9.6/static/errcodes-appendix.html + if e, ok := err.(*pq.Error); ok && e.Code == "57014" { + return true + } + + return false +} + +// HTTPError will respond in a standard way when err != nil. If +// err is nil, false is returned, true otherwise. +func HTTPError(ctx context.Context, w http.ResponseWriter, err error) bool { + if err == nil { + return false + } + + err = MapDBError(err) + if permission.IsUnauthorized(err) { + log.Debug(ctx, err) + http.Error(w, errors.Cause(err).Error(), http.StatusUnauthorized) + return true + } + if permission.IsPermissionError(err) { + log.Debug(ctx, err) + http.Error(w, errors.Cause(err).Error(), http.StatusForbidden) + return true + } + if validation.IsValidationError(err) { + log.Debug(ctx, err) + http.Error(w, errors.Cause(err).Error(), http.StatusBadRequest) + return true + } + if limit.IsLimitError(err) { + log.Debug(ctx, err) + http.Error(w, errors.Cause(err).Error(), http.StatusConflict) + return true + } + + if ctx.Err() != nil && isCtxCause(errors.Cause(err)) { + // context timed out or was canceled + log.Debug(ctx, err) + http.Error(w, http.StatusText(http.StatusGatewayTimeout), http.StatusGatewayTimeout) + return true + } + + log.Log(ctx, err) + http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) + return true +} diff --git a/util/errutil/maperror.go b/util/errutil/maperror.go new file mode 100644 index 0000000000..4d6395b374 --- /dev/null +++ b/util/errutil/maperror.go @@ -0,0 +1,70 @@ +package errutil + +import ( + "github.com/target/goalert/limit" + "github.com/target/goalert/validation" + "strings" + + "github.com/lib/pq" + "github.com/pkg/errors" +) + +// MapDBError will map known DB errors (like unique names) to a valiation error +func MapDBError(err error) error { + if err == nil { + return nil + } + dbErr, ok := errors.Cause(err).(*pq.Error) + if !ok { + return err + } + + switch dbErr.Code { + case "23503": // fkey constraint + switch dbErr.Constraint { + case "user_overrides_add_user_id_fkey": + return validation.NewFieldError("AddUserID", "user does not exist") + case "user_overrides_remove_user_id_fkey": + return validation.NewFieldError("RemoveUserID", "user does not exist") + case "user_overrides_tgt_schedule_id_fkey": + return validation.NewFieldError("TargetID", "schedule does not exist") + } + case "23505": // unique constraint + if strings.HasPrefix(dbErr.Constraint, dbErr.Table+"_name") || dbErr.Constraint == "auth_basic_users_username_key" { + return validation.NewFieldError("Name", "already in use") + } + if dbErr.Constraint == "user_contact_methods_type_value_key" { + return validation.NewFieldError("Value", "contact method already exists for that type and value") + } + if dbErr.Constraint == "user_notification_rules_contact_method_id_delay_minutes_key" { + return validation.NewFieldError("DelayMinutes", "notification rule already exists for that delay and contact method") + } + case "23514": // check constraint + newErr := limit.MapError(dbErr) + if newErr != nil { + return newErr + } + switch dbErr.Constraint { + case "user_overrides_check2": + return validation.NewFieldError("AddUserID", "cannot be the same as the user being replaced") + case "user_override_no_conflict_allowed": + return validation.NewFieldError("UserID", "cannot override the same user twice at the same time, check existing overrides; "+dbErr.Hint) + case "alert_status_user_id_match": + return validation.NewFieldError("AlertStatusCMID", "contact method is for wrong user") + case "notification_rule_user_id_match": + return validation.NewFieldError("UserID", "contact method is for wrong user") + } + } + + switch dbErr.Constraint { + case "services_escalation_policy_id_fkey": + if strings.Contains(dbErr.Detail, "is still referenced") { + return validation.NewFieldError("EscalationPolicyID", "is currently in use") + } + if strings.Contains(dbErr.Detail, "is not present") { + return validation.NewFieldError("EscalationPolicyID", "does not exist") + } + } + + return err +} diff --git a/util/errutil/scruberror.go b/util/errutil/scruberror.go new file mode 100644 index 0000000000..06087b5fe4 --- /dev/null +++ b/util/errutil/scruberror.go @@ -0,0 +1,30 @@ +package errutil + +import ( + "github.com/pkg/errors" +) + +// SafeError is an error string, safe to return to the client. +type SafeError string + +// ClientError always returns true. +func (SafeError) ClientError() bool { return true } + +func (err SafeError) Error() string { return string(err) } + +// ScrubError will replace an err with a generic one if it is not a validation error. +// The boolean value indicates if the error was scrubbed (replaced with a safe one). +func ScrubError(err error) (bool, error) { + if err == nil { + return false, nil + } + type safe interface { + ClientError() bool + } + + if c, ok := errors.Cause(err).(safe); ok && c.ClientError() { + return false, err + } + + return true, errors.New("unexpected error") +} diff --git a/util/loadlocation.go b/util/loadlocation.go new file mode 100644 index 0000000000..c3868d2856 --- /dev/null +++ b/util/loadlocation.go @@ -0,0 +1,30 @@ +package util + +import ( + "sync" + "time" +) + +var tzCache = make(map[string]*time.Location, 100) +var tzMx sync.Mutex + +// LoadLocation works like time.LoadLocation but caches the result +// for the life of the process. +func LoadLocation(name string) (*time.Location, error) { + tzMx.Lock() + defer tzMx.Unlock() + + loc, ok := tzCache[name] + if ok { + return loc, nil + } + + loc, err := time.LoadLocation(name) + if err != nil { + return nil, err + } + + tzCache[name] = loc + + return loc, nil +} diff --git a/util/log/fields.go b/util/log/fields.go new file mode 100644 index 0000000000..c6c5655942 --- /dev/null +++ b/util/log/fields.go @@ -0,0 +1,90 @@ +package log + +import ( + "context" + + "go.opencensus.io/trace" +) + +// Fields are used to add values in structured logging. +type Fields map[string]interface{} +type logContextField string + +// SetRequestID will assign a unique ID to the context for tracing. +func SetRequestID(ctx context.Context) context.Context { + if ctx == nil { + ctx = defaultContext + } + return context.WithValue(ctx, logContextKeyRequestID, trace.FromContext(ctx).SpanContext().TraceID.String()) +} + +// ContextFields will return the current set of fields associated with a context. +func ContextFields(ctx context.Context) Fields { + if ctx == nil { + ctx = defaultContext + } + f, _ := ctx.Value(logContextKeyFieldList).([]string) + m := make(Fields, len(f)) + for _, f := range f { + m[f] = ctx.Value(logContextField(f)) + } + return m +} + +// WithField will return a context with the specified field set to value. +func WithField(ctx context.Context, field string, value interface{}) context.Context { + if ctx == nil { + ctx = defaultContext + } + f, _ := ctx.Value(logContextKeyFieldList).([]string) + + var hasField bool + // Search for the field in the existing slice. + for _, fn := range f { + if field == fn { + hasField = true + break + } + } + + if !hasField { + // If the field is missing (i.e. it's new) we need to add it to the + // list of fields. + // + // So we create a copy of the slice -- as we don't want to + // modify the existing one, since it's used by the parent + // context. + fList := make([]string, len(f), len(f)+1) + copy(fList, f) + fList = append(fList, field) + f = fList + } + + ctx = context.WithValue(ctx, logContextKeyFieldList, f) + ctx = context.WithValue(ctx, logContextField(field), value) + + return ctx +} + +// WithFields will return a context with the provided fields set. +func WithFields(ctx context.Context, fields Fields) context.Context { + if ctx == nil { + ctx = defaultContext + } + if fields == nil { + return ctx + } + for field, value := range fields { + ctx = WithField(ctx, field, value) + } + return ctx +} + +// RequestID will return the associated RequestID or empty string if missing. +func RequestID(ctx context.Context) string { + if ctx == nil { + return "" + } + v, _ := ctx.Value(logContextKeyRequestID).(string) + return v +} diff --git a/util/log/fields_test.go b/util/log/fields_test.go new file mode 100644 index 0000000000..6304641124 --- /dev/null +++ b/util/log/fields_test.go @@ -0,0 +1,62 @@ +package log + +import ( + "context" + "testing" +) + +func TestWithField(t *testing.T) { + ctx := context.Background() + + foo := WithField(ctx, "foo", "bar") + bin := WithField(ctx, "bin", "baz") + + m := ContextFields(ctx) + if len(m) != 0 { + t.Errorf("no. fields for background ctx is %d; want 0", len(m)) + } + + m = ContextFields(foo) + if len(m) != 1 { + t.Errorf("no. fields for background ctx is %d; want 1", len(m)) + } + val, _ := m["foo"].(string) + if val != "bar" { + t.Errorf("foo = %s; want bar", val) + } + + m = ContextFields(bin) + if len(m) != 1 { + t.Errorf("no. fields for background ctx is %d; want 1", len(m)) + } + val, _ = m["bin"].(string) + if val != "baz" { + t.Errorf("bin = %s; want baz", val) + } + + foo2 := WithField(foo, "foo2", "bar2") + + m = ContextFields(foo) + if len(m) != 1 { + t.Errorf("no. fields for background ctx is %d; want 1", len(m)) + } + m = ContextFields(foo2) + if len(m) != 2 { + t.Errorf("no. fields for background ctx is %d; want 2", len(m)) + } + val, _ = m["foo2"].(string) + if val != "bar2" { + t.Errorf("foo2 = %s; want bar2", val) + } + + foo3 := WithField(foo, "foo", "blah") + m = ContextFields(foo3) + if len(m) != 1 { + t.Errorf("no. fields for background ctx is %d; want 1", len(m)) + } + val, _ = m["foo"].(string) + if val != "blah" { + t.Errorf("foo = %s; want blah", val) + } + +} diff --git a/util/log/log.go b/util/log/log.go new file mode 100644 index 0000000000..9a79513fc8 --- /dev/null +++ b/util/log/log.go @@ -0,0 +1,151 @@ +package log + +import ( + "context" + "fmt" + "os" + + "github.com/lib/pq" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/crypto/ssh/terminal" +) + +type logContextKey int + +const ( + logContextKeyDebug logContextKey = iota + logContextKeyRequestID + logContextKeyFieldList +) + +var defaultLogger = logrus.NewEntry(logrus.StandardLogger()) +var defaultContext = context.Background() +var verbose = false +var stacks = false + +// EnableStacks enables stack information via the Source field. +func EnableStacks() { + stacks = true +} + +// EnableJSON sets the output log format to JSON +func EnableJSON() { + logrus.SetFormatter(&logrus.JSONFormatter{}) +} +func init() { + if terminal.IsTerminal(int(os.Stderr.Fd())) { + logrus.SetFormatter(&terminalFormatter{}) + } +} + +// EnableVerbose sets verbose logging. All debug messages will be logged. +func EnableVerbose() { + verbose = true +} + +func getLogger(ctx context.Context) *logrus.Entry { + l := defaultLogger + if ctx == nil { + return l + } + + l = l.WithFields(logrus.Fields(ContextFields(ctx))) + + rid := RequestID(ctx) + if rid != "" { + l = l.WithField("RequestID", rid) + } + + return l +} + +type stackTracer interface { + StackTrace() errors.StackTrace +} + +func addSource(ctx context.Context, err error) context.Context { + err = findRootSource(err) // always returns stackTracer + if stacks { + ctx = WithField(ctx, "Source", fmt.Sprintf("%+v", err.(stackTracer).StackTrace())) + } + if perr, ok := errors.Cause(err).(*pq.Error); ok && perr.Detail != "" { + ctx = WithField(ctx, "SQLErrDetails", perr.Detail) + } + return ctx +} + +type causer interface { + Cause() error +} + +func findRootSource(err error) error { + var rootErr error + for { + if c, ok := err.(causer); ok { + err = c.Cause() + } else { + break + } + if _, ok := err.(stackTracer); ok { + rootErr = err + } + } + if rootErr == nil { + rootErr = errors.WithStack(err) + } + return rootErr +} + +// Log will log an application error. +func Log(ctx context.Context, err error) { + if err == nil { + return + } + ctx = addSource(ctx, err) + getLogger(ctx).WithError(err).Errorln() +} + +// Logf will log application information. +func Logf(ctx context.Context, format string, args ...interface{}) { + getLogger(ctx).Printf(format, args...) +} + +// Debugf will log the formatted string if the context has debug logging enabled. +func Debugf(ctx context.Context, format string, args ...interface{}) { + if ctx == nil { + ctx = defaultContext + } + if !verbose { + if v, _ := ctx.Value(logContextKeyDebug).(bool); !v { + return + } + } + + getLogger(ctx).Infof(format, args...) +} + +// Debug will log err if the context has debug logging enabled. +func Debug(ctx context.Context, err error) { + if err == nil { + return + } + if ctx == nil { + ctx = defaultContext + } + if !verbose { + if v, _ := ctx.Value(logContextKeyDebug).(bool); !v { + return + } + } + ctx = addSource(ctx, err) + + getLogger(ctx).WithError(err).Infoln() +} + +// EnableDebug will return a context where debug logging is enabled for it +// and all child contexts. +func EnableDebug(ctx context.Context) context.Context { + return context.WithValue(ctx, logContextKeyDebug, true) +} diff --git a/util/log/sqlhighlight.go b/util/log/sqlhighlight.go new file mode 100644 index 0000000000..05654a7e2b --- /dev/null +++ b/util/log/sqlhighlight.go @@ -0,0 +1,15 @@ +// +build sql_highlight + +package log + +import ( + "io" + + "github.com/alecthomas/chroma/quick" +) + +func init() { + sqlHighlight = func(w io.Writer, q string) error { + return quick.Highlight(w, q, "sql", "terminal256", "monokai") + } +} diff --git a/util/log/terminalformatter.go b/util/log/terminalformatter.go new file mode 100644 index 0000000000..38a5227935 --- /dev/null +++ b/util/log/terminalformatter.go @@ -0,0 +1,86 @@ +package log + +import ( + "bytes" + "io" + "strconv" + "strings" + + "github.com/fatih/color" + "github.com/lib/pq" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var sqlHighlight = func(w io.Writer, q string) error { + _, err := io.WriteString(w, q) + return err +} + +type terminalFormatter struct { + fallback logrus.TextFormatter +} +type queryError interface { + Query() string + Cause() *pq.Error +} + +func lineCol(q string, pos int) (int, int) { + if pos > len(q) { + pos = len(q) + } + lines := strings.Split(q[:pos], "\n") + lastLine := lines[len(lines)-1] + lastLine = strings.Replace(lastLine, "\t", strings.Repeat(" ", 8), -1) + return len(lines), len(lastLine) - 1 +} +func makeCodeFrame(q string, e *pq.Error) string { + + buf := new(bytes.Buffer) + err := sqlHighlight(buf, q) + var code string + if err != nil { + code = q + } else { + code = buf.String() + } + + buf.Reset() + pos, err := strconv.Atoi(e.Position) + if err == nil { + l, c := lineCol(q, pos) + lines := strings.Split(code, "\n") + buf.WriteString(strings.Join(lines[:l], "\n") + "\n") + buf.WriteString( + color.New(color.Bold, color.FgRed).Sprint( + strings.Repeat("_", c)) + color.New(color.Bold, color.FgMagenta).Sprint("^") + + "\n" + color.New(color.FgRed).Sprint(e.Message) + "\n\n", + ) + buf.WriteString(strings.Join(lines[l:], "\n")) + } + + return buf.String() +} +func (t *terminalFormatter) Format(e *logrus.Entry) ([]byte, error) { + err, ok := e.Data["error"].(error) + if ok { + if qe, ok := errors.Cause(err).(queryError); ok { + e.Message += qe.Cause().Message + frame := makeCodeFrame(qe.Query(), qe.Cause()) + if frame != "" { + e.Message += "\n\n" + frame + } + + } else { + e.Message += err.Error() + } + delete(e.Data, "error") + } + src, ok := e.Data["Source"].(string) + if ok { + delete(e.Data, "Source") + e.Message += "\n" + src + } + + return t.fallback.Format(e) +} diff --git a/util/sqlprepare.go b/util/sqlprepare.go new file mode 100644 index 0000000000..ca0bfc1c26 --- /dev/null +++ b/util/sqlprepare.go @@ -0,0 +1,74 @@ +package util + +import ( + "context" + "database/sql" + + "github.com/lib/pq" + "github.com/pkg/errors" +) + +type ContextPreparer interface { + PrepareContext(context.Context, string) (*sql.Stmt, error) +} + +type PrepareStmt struct { + *sql.Stmt + q string +} + +func (p *PrepareStmt) PrepareFor(ctx context.Context, cp ContextPreparer) (*sql.Stmt, error) { + return cp.PrepareContext(ctx, p.q) +} + +type Preparer interface { + PrepareContext(context.Context, string) (*sql.Stmt, error) +} + +// Prepare is used to prepare SQL statements. +// +// If Ctx is specified, it will be used to prepare all statements. +// Only the first error is recorded. Subsequent calls to `P` are +// ignored after a failure. +type Prepare struct { + DB Preparer + Ctx context.Context + Err error +} + +type queryErr struct { + q string + err *pq.Error +} +type QueryError interface { + Query() string + Cause() *pq.Error +} + +func (q *queryErr) Query() string { return q.q } +func (q *queryErr) Cause() *pq.Error { return q.err } +func (q *queryErr) Error() string { return q.err.Error() } + +func (p *Prepare) P(query string) (s *sql.Stmt) { + if p.Err != nil { + return nil + } + + if p.Ctx != nil { + s, p.Err = p.DB.PrepareContext(p.Ctx, query) + } else { + s, p.Err = p.DB.PrepareContext(context.Background(), query) + } + if p.Err != nil { + if pqe, ok := p.Err.(*pq.Error); ok { + p.Err = &queryErr{ + err: pqe, + q: query, + } + } + + p.Err = errors.WithStack(p.Err) + } + return s + +} diff --git a/util/util.go b/util/util.go new file mode 100644 index 0000000000..45c5674265 --- /dev/null +++ b/util/util.go @@ -0,0 +1,29 @@ +package util + +import ( + "net/url" + "strings" +) + +// JoinURL will join a base URL and suffix, taking care to preserve and merge query parameters. +func JoinURL(base, suffix string) (string, error) { + bu, err := url.Parse(base) + if err != nil { + return "", err + } + + su, err := url.Parse(suffix) + if err != nil { + return "", err + } + + bu.Path = strings.TrimSuffix(bu.Path, "/") + "/" + strings.TrimPrefix(su.Path, "/") + + v := bu.Query() + for name := range su.Query() { + v.Set(name, su.Query().Get(name)) + } + bu.RawQuery = v.Encode() + + return bu.String(), nil +} diff --git a/util/util_test.go b/util/util_test.go new file mode 100644 index 0000000000..ec25167572 --- /dev/null +++ b/util/util_test.go @@ -0,0 +1,28 @@ +package util + +import ( + "testing" +) + +func TestJoinURL(t *testing.T) { + test := func(name, base, suffix, expected string) { + t.Run(name, func(t *testing.T) { + result, err := JoinURL(base, suffix) + if err != nil { + t.Fatalf("err = %v; want nil", err) + } + + if result != expected { + t.Errorf("result = '%s'; want '%s'", result, expected) + } + }) + } + + test("no trailing slash", "http://foo.bar", "/baz", "http://foo.bar/baz") + test("both slashes", "http://foo.bar/", "/baz", "http://foo.bar/baz") + test("no slashes", "http://foo.bar", "baz", "http://foo.bar/baz") + test("trailing slash", "http://foo.bar/", "baz", "http://foo.bar/baz") + test("query param on base", "http://foo.bar?a=b", "/baz", "http://foo.bar/baz?a=b") + test("query param on suffix", "http://foo.bar", "/baz?c=d", "http://foo.bar/baz?c=d") + test("both query params", "http://foo.bar?a=b", "/baz?c=d", "http://foo.bar/baz?a=b&c=d") +} diff --git a/validation/fieldvalidationerror.go b/validation/fieldvalidationerror.go new file mode 100644 index 0000000000..7a653678b0 --- /dev/null +++ b/validation/fieldvalidationerror.go @@ -0,0 +1,105 @@ +package validation + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" +) + +// A FieldError represents an invalid field during validation. +type FieldError interface { + error + Validation() bool + Field() string + Reason() string +} +type MultiFieldError interface { + error + Validation() bool + FieldErrors() []FieldError +} + +type validation interface { + Validation() bool +} + +type fieldError struct { + stack errors.StackTrace + reason string + fieldName string +} +type fieldErrors struct { + stack errors.StackTrace + errors []FieldError +} + +// AddPrefix will prepend a prefix to all field names within the given error. +func AddPrefix(fieldPrefix string, err error) error { + switch e := errors.Cause(err).(type) { + case *fieldError: + e.fieldName = fieldPrefix + e.fieldName + case *fieldErrors: + for _, err := range e.errors { + if e, ok := err.(*fieldError); ok { + e.fieldName = fieldPrefix + e.fieldName + } + } + } + return err +} + +func (f fieldError) ClientError() bool { return true } +func (f fieldErrors) ClientError() bool { return true } + +func (f fieldError) StackTrace() errors.StackTrace { return f.stack } +func (f fieldError) Reason() string { return f.reason } +func (f fieldError) Error() string { + return fmt.Sprintf("invalid value for '%s': %s", f.fieldName, f.reason) +} +func (f fieldError) Validation() bool { return true } +func (f fieldError) Field() string { return f.fieldName } + +func (f fieldErrors) Validation() bool { return true } +func (f fieldErrors) Field() string { + names := make([]string, len(f.errors)) + for i, e := range f.errors { + names[i] = e.Field() + } + return strings.Join(names, ",") +} +func (f fieldErrors) StackTrace() errors.StackTrace { return f.stack } +func (f fieldErrors) Error() string { + strs := make([]string, len(f.errors)) + for i, e := range f.errors { + strs[i] = e.Error() + } + return strings.Join(strs, "\n") +} +func (f fieldErrors) FieldErrors() []FieldError { + errs := make([]FieldError, len(f.errors)) + copy(errs, f.errors) + return errs +} + +type stackTracer interface { + StackTrace() errors.StackTrace +} + +// NewMultiFieldError will combine multiple FieldErrors into a MultiFieldError. +func NewMultiFieldError(errs []FieldError) MultiFieldError { + return &fieldErrors{errors: errs, stack: errors.New("").(stackTracer).StackTrace()} +} + +// NewFieldError will create a new FieldError for the given field and reason +func NewFieldError(fieldName string, reason string) FieldError { + return &fieldError{reason: reason, fieldName: fieldName, stack: errors.New("").(stackTracer).StackTrace()} +} + +// IsValidationError will determine if an error's cause is a field validation error. +func IsValidationError(err error) bool { + if e, ok := errors.Cause(err).(validation); ok && e.Validation() { + return true + } + return false +} diff --git a/validation/validate/email.go b/validation/validate/email.go new file mode 100644 index 0000000000..0e097e9212 --- /dev/null +++ b/validation/validate/email.go @@ -0,0 +1,25 @@ +package validate + +import ( + "github.com/target/goalert/validation" + "net/mail" + "strings" +) + +// SanitizeEmail will try to parse the email field and then return lower-case address portion or an empty string if parse failed. +func SanitizeEmail(email string) string { + m, err := mail.ParseAddress(email) + if err != nil { + return "" + } + return strings.ToLower(m.Address) +} + +// Email will validate an email address, returning a FieldError +// if invalid. Both named and un-named addresses are valid. +func Email(fname, email string) error { + if _, err := mail.ParseAddress(email); err != nil { + return validation.NewFieldError(fname, "must be a valid email: "+err.Error()) + } + return nil +} diff --git a/validation/validate/idname.go b/validation/validate/idname.go new file mode 100644 index 0000000000..afee4141a6 --- /dev/null +++ b/validation/validate/idname.go @@ -0,0 +1,37 @@ +package validate + +import ( + "github.com/target/goalert/validation" + "regexp" +) + +var idRx = regexp.MustCompile(`^[a-zA-Z0-9 \-_']+$`) + +// IDName will validate a ASCII name/identifier to ensure it is between 2 and 64 characters, +// starts with a letter, contains only letters, numbers, and spaces `-`, `_` or `'`. +// +// If invalid, a FieldError with the given field name is returned. +func IDName(fname, name string) error { + b := []byte(name) + l := len(b) + if l < 2 { + return validation.NewFieldError(fname, "must be at least 2 characters") + } + if l > 64 { + return validation.NewFieldError(fname, "cannot be more than 64 characters") + } + + if (b[0] < 'a' || b[0] > 'z') && (b[0] < 'A' || b[0] > 'Z') { + return validation.NewFieldError(fname, "must begin with a letter") + } + + if !idRx.Match(b) { + return validation.NewFieldError(fname, "can only contain letters, digits, hyphens, underscores, apostrophe and space") + } + + if b[l-1] == ' ' { + return validation.NewFieldError(fname, "must not end with space") + } + + return nil +} diff --git a/validation/validate/idname_test.go b/validation/validate/idname_test.go new file mode 100644 index 0000000000..ae45fdfddd --- /dev/null +++ b/validation/validate/idname_test.go @@ -0,0 +1,38 @@ +package validate + +import ( + "testing" +) + +func TestIDName(t *testing.T) { + test := func(valid bool, n string) { + var title string + if valid { + title = "Valid" + } else { + title = "Invalid" + } + t.Run(title, func(t *testing.T) { + err := IDName("Name", n) + if err == nil && !valid { + t.Errorf("IDName(%s) = nil; want err", n) + } else if err != nil && valid { + t.Errorf("IDName(%s) = %v; want nil", n, err) + } + }) + } + + invalid := []string{ + "", " ", " 5", "5for", "_asdf", ",asdf", "asdf\nasdf", "end ", + "a#%$^#$%a", + } + valid := []string{ + "a-_' 0", + } + for _, n := range invalid { + test(false, n) + } + for _, n := range valid { + test(true, n) + } +} diff --git a/validation/validate/labelkey.go b/validation/validate/labelkey.go new file mode 100644 index 0000000000..abb7f298d7 --- /dev/null +++ b/validation/validate/labelkey.go @@ -0,0 +1,79 @@ +package validate + +import ( + "github.com/target/goalert/validation" + "regexp" + "strings" +) + +var labelKeyPrefixRx = regexp.MustCompile(`^[a-z0-9][a-z0-9-]{2,63}(\.[a-z0-9][a-z0-9-]{2,63})*$`) + +// LabelKey will validate a label key field to ensure it follows a particular format. +// +// A label key consists of a prefix (lowercase alphanumeric /w hyphens -- domain name rules) followed by +// a `/` and a suffix consisting of alphanumeric characters and hyphens. +// The entire key may not exceed 255 characters. +func LabelKey(fname, body string) error { + // Checking length + r := []rune(body) + l := len(r) + + if l < 1 { + return validation.NewFieldError(fname, "must not be empty") + } + if l > 255 { + return validation.NewFieldError(fname, "cannot exceed 255 characters") + } + + parts := strings.SplitN(body, "/", 2) + if len(parts) != 2 { + return validation.NewFieldError(fname, "prefix and suffix must be separated by `/`") + } + + prefix := parts[0] + suffix := parts[1] + + if len(prefix) < 3 { + return validation.NewFieldError(fname, "prefix: must be at least 3 characters") + } + + if len(suffix) == 0 { + return validation.NewFieldError(fname, "suffix: must not be empty") + } + + if (prefix[0] < 48 || prefix[0] > 57) && (prefix[0] < 97 || prefix[0] > 122) { + return validation.NewFieldError(fname, "prefix: must begin with a lower-case letter or number") + } + + idx := strings.IndexFunc(prefix, func(r rune) bool { + if r == '.' || r == '-' { + return false + } + if r >= 48 && r <= 57 { // numbers + return false + } + if r >= 97 && r <= 122 { // lowercase letters + return false + } + + // anything else + return true + }) + if idx != -1 { + return validation.NewFieldError(fname, "prefix: may only contain lowercase letters, numbers, hyphens, or periods") + } + + if !labelKeyPrefixRx.MatchString(prefix) { + return validation.NewFieldError(fname, "prefix: must follow domain name formatting") + } + + type reasoner interface { + Reason() string + } + err := LabelValue(fname, suffix) + if err != nil { + return validation.NewFieldError(fname, "suffix: "+err.(reasoner).Reason()) + } + + return nil +} diff --git a/validation/validate/labelkey_test.go b/validation/validate/labelkey_test.go new file mode 100644 index 0000000000..a37b1d9b77 --- /dev/null +++ b/validation/validate/labelkey_test.go @@ -0,0 +1,26 @@ +package validate + +import "testing" + +func TestLabelKey(t *testing.T) { + check := func(valid bool, values ...string) { + for _, val := range values { + t.Run(val, func(t *testing.T) { + err := LabelKey("", val) + if valid && err != nil { + t.Errorf("got %v; want nil", err) + } else if !valid && err == nil { + t.Errorf("got nil; want err") + } + }) + } + } + + check(true, + "foo/bar", "bin.baz/abc", "0123/abc", "foo-bar/abc", + ) + check(false, + "test", "Foo/bar", "-test/ok", "a/b", "foo/ok", "foo'/bar", " ", "", "foo /bar", + "/", "//", "/foo/", "/foo/bar", "foo/", + ) +} diff --git a/validation/validate/labelvalue.go b/validation/validate/labelvalue.go new file mode 100644 index 0000000000..fcf45627a0 --- /dev/null +++ b/validation/validate/labelvalue.go @@ -0,0 +1,42 @@ +package validate + +import ( + "github.com/target/goalert/validation" + "strings" + "unicode" +) + +// LabelValue will validate a label value field to ensure it consists of only printable characters as defined by Go. +// https://golang.org/pkg/unicode/#IsPrint +// It must be between 3 and 255 characters. +// If invalid, a FieldError with the given field name is returned. +func LabelValue(fname, body string) error { + r := []rune(body) + l := len(r) + + if l == 0 { + return nil + } + + if l < 3 { + return validation.NewFieldError(fname, "must be at least 3 characters") + } + + if l > 255 { + return validation.NewFieldError(fname, "cannot exceed 255 characters") + } + + if strings.TrimSpace(body) != body { + return validation.NewFieldError(fname, "must not begin or end with a space") + } + if strings.Contains(body, " ") { + return validation.NewFieldError(fname, "must not contain double spaces") + } + + for _, i := range r { + if !unicode.IsPrint(i) { + return validation.NewFieldError(fname, "must only contain printable characters") + } + } + return nil +} diff --git a/validation/validate/labelvalue_test.go b/validation/validate/labelvalue_test.go new file mode 100644 index 0000000000..00be785384 --- /dev/null +++ b/validation/validate/labelvalue_test.go @@ -0,0 +1,26 @@ +package validate + +import "testing" + +func TestLabelValue(t *testing.T) { + check := func(valid bool, values ...string) { + for _, val := range values { + t.Run(val, func(t *testing.T) { + t.Log("'" + val + "'") + err := LabelValue("", val) + if valid && err != nil { + t.Errorf("got %v; want nil", err) + } else if !valid && err == nil { + t.Errorf("got nil; want err") + } + }) + } + } + + check(true, + "foo", "foo bar", "FooBar", "foo-bar", "foo- 9bar", "", "foo'/bar", "@okay", "&n*#9\\; wowz@ $ \\/yee", + ) + check(false, + " ", " foo", "foo ", "fo", "-", "unprintable"+string('\t'), "unprintable"+string('\n'), "unprintable"+string('\v'), "unprintable"+string('\f'), "unprintable"+string('\r'), + ) +} diff --git a/validation/validate/many.go b/validation/validate/many.go new file mode 100644 index 0000000000..289222967b --- /dev/null +++ b/validation/validate/many.go @@ -0,0 +1,37 @@ +package validate + +import ( + "github.com/target/goalert/validation" +) + +// Many will take multiple input error values, filter out nils +// and flatten any nested MultiFieldErrors. +// +// If a given error is not a FieldError, or MultiFieldError it is returned immediately. +// +// If all errs are nil, nil is returned. +// If only one error is present, it is returned. +func Many(errs ...error) error { + flat := make([]validation.FieldError, 0, len(errs)) + + for _, e := range errs { + switch err := e.(type) { + case validation.MultiFieldError: + flat = append(flat, err.FieldErrors()...) + case validation.FieldError: + flat = append(flat, err) + case error: + return e + case nil: + default: + return e + } + } + if len(flat) == 0 { + return nil + } + if len(flat) == 1 { + return flat[0] + } + return validation.NewMultiFieldError(flat) +} diff --git a/validation/validate/many_test.go b/validation/validate/many_test.go new file mode 100644 index 0000000000..a260101069 --- /dev/null +++ b/validation/validate/many_test.go @@ -0,0 +1,36 @@ +package validate + +import ( + "github.com/target/goalert/validation" + "strings" + "testing" +) + +func TestMany(t *testing.T) { + err := validation.NewFieldError("Test", "test error") + + e := Many(err) + if !validation.IsValidationError(e) { + t.Errorf("got %v; want %v", err, e) + } + + e = Many(err, nil) + if e == nil { + t.Error("got nil; want err") + } + + e = Many(err, validation.NewFieldError("Other", "other error")) + if e == nil { + t.Error("got nil; want err") + } + if !strings.Contains(e.Error(), "Test") { + t.Errorf("got '%s'; should contain 'Test'", e.Error()) + } + if !strings.Contains(e.Error(), "Other") { + t.Errorf("got '%s'; should contain 'Other'", e.Error()) + } + + if !validation.IsValidationError(e) { + t.Error("IsValidationError = false; want true") + } +} diff --git a/validation/validate/name.go b/validation/validate/name.go new file mode 100644 index 0000000000..4083b06bee --- /dev/null +++ b/validation/validate/name.go @@ -0,0 +1,79 @@ +package validate + +import ( + "fmt" + "github.com/target/goalert/validation" + "strings" + "unicode" +) + +const upperLimit = 256 + +// SanitizeName will remove all invalid characters +// and return a valid name (as defined by ValidateName) or an empty string. +// +// It is used in cases where the input is not provided by a user, and should +// be used as-is if possible. An example would be importing a user profile from +// GitHub. +// +// If longer than upperLimit, the extra characters are dropped. +func SanitizeName(name string) string { + + // strip out anything that's not a letter or digit + // and normalize spaces + name = strings.Map(func(r rune) rune { + if unicode.IsSpace(r) { + return ' ' + } + if unicode.IsPrint(r) { + return r + } + return -1 + }, name) + + // trim leading/trailing spaces + name = strings.TrimSpace(name) + name = strings.Replace(name, " ", " ", -1) + + r := []rune(name) + if len(r) < 1 { + return "" + } + if len(r) > upperLimit { + return strings.TrimSpace(string(r[:upperLimit])) + } + + return name +} + +// Name will validate a unicode name to ensure it is between 1 and upperLimit runes, +// and only consists of printable unicode characters. +// +// If invalid, a FieldError with the given field name is returned. +func Name(fname, name string) error { + r := []rune(name) + l := len(r) + if l < 1 { + return validation.NewFieldError(fname, "must not be empty") + } + if l > upperLimit { + return validation.NewFieldError(fname, fmt.Sprintf("cannot be more than %d characters", upperLimit)) + } + + idx := strings.IndexFunc(name, func(r rune) bool { + if unicode.IsSpace(r) && r != ' ' { + // whitespace other than space (chr 32) + return true + } + return !unicode.IsPrint(r) + }) + if idx != -1 { + return validation.NewFieldError(fname, "can only contain printable characters") + } + + if strings.TrimSpace(name) != name { + return validation.NewFieldError(fname, "must not begin or end with space") + } + + return nil +} diff --git a/validation/validate/name_test.go b/validation/validate/name_test.go new file mode 100644 index 0000000000..e747e153cd --- /dev/null +++ b/validation/validate/name_test.go @@ -0,0 +1,59 @@ +package validate + +import ( + "testing" +) + +func TestSanitizeName(t *testing.T) { + check := func(name, exp string) { + t.Run(name, func(t *testing.T) { + t.Logf("Name='%s'", name) + res := SanitizeName(name) + if res != exp { + t.Errorf("got '%s'; want '%s'", res, exp) + } + }) + } + check(" foo", "foo") + check("okay \b", "okay") + check("okay\n\nthen", "okay then") + check("foo-bar", "foo-bar") +} + +func TestName(t *testing.T) { + check := func(name string, ok bool) { + t.Run(name, func(t *testing.T) { + t.Logf("Name='%s'", name) + err := Name("", name) + if err != nil && ok { + t.Errorf("got %v; want nil", err) + } else if err == nil && !ok { + t.Errorf("got nil; want err") + } + }) + } + + valid := []string{ + "foo", + "bar-Bin", + "baz ok", + "o'hello", + "абаза бызшва (abaza bəzš˚a)", + "𒀝𒅗𒁺𒌑 (Akkadû)", + "客家話 [客家话]", + "Xaat Kíl", + } + for _, n := range valid { + check(n, true) + } + + invalid := []string{ + "", + " a", + "a ", + "test\b", + } + for _, n := range invalid { + check(n, false) + } +} diff --git a/validation/validate/oneof.go b/validation/validate/oneof.go new file mode 100644 index 0000000000..6e7a6163f8 --- /dev/null +++ b/validation/validate/oneof.go @@ -0,0 +1,23 @@ +package validate + +import ( + "fmt" + "github.com/target/goalert/validation" + "strings" +) + +// OneOf will check that value is one of the provided options. +func OneOf(fname string, value interface{}, options ...interface{}) error { + for _, o := range options { + if o == value { + return nil + } + } + + msg := []string{} + for _, o := range options { + msg = append(msg, fmt.Sprintf("%v", o)) + } + + return validation.NewFieldError(fname, "must be one of: "+strings.Join(msg, ", ")) +} diff --git a/validation/validate/oneof_test.go b/validation/validate/oneof_test.go new file mode 100644 index 0000000000..8c61b0e0ac --- /dev/null +++ b/validation/validate/oneof_test.go @@ -0,0 +1,21 @@ +package validate + +import ( + "testing" +) + +type testType string + +const ( + testType1 = testType("a") + testType2 = testType("b") + testType3 = testType("c") +) + +func TestOneOf(t *testing.T) { + m := testType1 + err := OneOf("foo", m, testType1, testType2, testType3) + if err != nil { + t.Errorf("err was %+v; want nil", err) + } +} diff --git a/validation/validate/phone.go b/validation/validate/phone.go new file mode 100644 index 0000000000..d8c56e1e3f --- /dev/null +++ b/validation/validate/phone.go @@ -0,0 +1,38 @@ +package validate + +import ( + "fmt" + "github.com/target/goalert/validation" + "regexp" + "strings" + + "github.com/ttacon/libphonenumber" +) + +var phoneRx = regexp.MustCompile(`^\+\d{1,15}$`) + +// Phone will validate a phone number, returning a FieldError +// if invalid. +func Phone(fname, phone string) error { + if !strings.HasPrefix(phone, "+") { + return validation.NewFieldError(fname, "must contain country code") + } + if len(phone) < 2 { + return validation.NewFieldError(fname, "must contain 1 or more digits") + } + if len(phone) > 16 { + return validation.NewFieldError(fname, "must contain no more than 15 digits") + } + if !phoneRx.MatchString(phone) { + return validation.NewFieldError(fname, "must only contain digits") + } + p, err := libphonenumber.Parse(phone, "") + if err != nil { + return validation.NewFieldError(fname, fmt.Sprintf("must be a valid number: %s", err.Error())) + } + + if !libphonenumber.IsValidNumber(p) { + return validation.NewFieldError(fname, "must be a valid number") + } + return nil +} diff --git a/validation/validate/phone_test.go b/validation/validate/phone_test.go new file mode 100644 index 0000000000..de133c845c --- /dev/null +++ b/validation/validate/phone_test.go @@ -0,0 +1,41 @@ +package validate + +import ( + "testing" +) + +func TestPhone(t *testing.T) { + check := func(number string, expValid bool) { + name := "valid" + if !expValid { + name = "invalid" + } + t.Run(name, func(t *testing.T) { + err := Phone("", number) + if expValid && err != nil { + t.Errorf("got %v; want %s to be valid (nil err)", err, number) + } else if !expValid && err == nil { + t.Errorf("got nil; want %s to be invalid", number) + } + }) + } + + valid := []string{ + "+17633453456", + "+919632040000", + "+17734562190", + "+916301210000", + } + for _, number := range valid { + check(number, true) + } + + invalid := []string{ + "+10633453456", + "+15555555555", + } + for _, number := range invalid { + check(number, false) + } + +} diff --git a/validation/validate/range.go b/validation/validate/range.go new file mode 100644 index 0000000000..1972aee075 --- /dev/null +++ b/validation/validate/range.go @@ -0,0 +1,22 @@ +package validate + +import ( + "github.com/target/goalert/validation" + "strconv" +) + +// Range will ensure a value is between min and max (inclusive). +// A FieldError is returned otherwise. +func Range(fname string, val, min, max int) error { + if min == 0 && val < 0 { + return validation.NewFieldError(fname, "must not be negative") + } + if val < min { + return validation.NewFieldError(fname, "must not be below "+strconv.Itoa(min)) + } + if val > max { + return validation.NewFieldError(fname, "must not be over "+strconv.Itoa(max)) + } + + return nil +} diff --git a/validation/validate/subjectid.go b/validation/validate/subjectid.go new file mode 100644 index 0000000000..79e9199f45 --- /dev/null +++ b/validation/validate/subjectid.go @@ -0,0 +1,33 @@ +package validate + +import ( + "github.com/target/goalert/validation" + "strings" +) + +// SubjectID will validate a given OIDC subject ID. It ensures that the field +// consists of valid ASCII characters, is not empty and +// does not exceed max characters. +// As per http://openid.net/specs/openid-connect-core-1_0.html#IDToken +// For sub : It MUST NOT exceed 255 ASCII characters in length. +func SubjectID(fname, body string) error { + idx := strings.IndexFunc(body, func(r rune) bool { + return r < 32 || r > 126 + }) + if idx != -1 { + // non-ASCII characters exist + return validation.NewFieldError(fname, "must not contain non-ASCII characters") + } + + // Checking length + r := []rune(body) + l := len(r) + + if l < 1 { + return validation.NewFieldError(fname, "must not be empty") + } + if l > 255 { + return validation.NewFieldError(fname, "cannot exceed 255 characters") + } + return nil +} diff --git a/validation/validate/text.go b/validation/validate/text.go new file mode 100644 index 0000000000..9d46eb2d2a --- /dev/null +++ b/validation/validate/text.go @@ -0,0 +1,84 @@ +package validate + +import ( + "github.com/target/goalert/validation" + "strconv" + "strings" + "unicode" +) + +// SanitizeText will sanitize a text body so that it passes the Text validation. +func SanitizeText(body string, maxLen int) string { + body = strings.Map(func(r rune) rune { + if unicode.IsPrint(r) || r == '\t' || r == '\n' { + return r + } + if unicode.IsSpace(r) { + return ' ' + } + return -1 + }, body) + + // remove trailing space from lines + lines := strings.Split(body, "\n") + for i, line := range lines { + lines[i] = strings.TrimRight(line, " \t") + } + body = strings.Join(lines, "\n") + + // strip multiple newlines (more than 2) + body = strings.Replace(body, "\n\n\n", "\n\n", -1) + body = strings.TrimSpace(body) + + r := []rune(body) + if maxLen > 0 && len(r) > maxLen { + // truncate the message to fit maxLen if needed + return string(r[:maxLen-1]) + "…" + } + return body +} + +// Text will validate a text body. It ensures that the field +// consists of valid unicode code-points, has at least min characters and +// does not exceed max characters, and that it doesn't begin or end with space. +// +// If body is empty, the input is considered valid, regardless of min value. +func Text(fname, body string, min, max int) error { + if body == "" { + return nil + } + + return RequiredText(fname, body, min, max) +} + +// RequiredText works like Text, but does not allow it to be blank, unless min is set to 0. +func RequiredText(fname, body string, min, max int) error { + r := []rune(body) + l := len(r) + + if l == 0 && min == 0 { + return nil + } + + if min > 1 && l < min { + return validation.NewFieldError(fname, "must be at least "+strconv.Itoa(min)+" characters") + } else if min == 1 && l < min { + return validation.NewFieldError(fname, "must not be empty") + } + if l > max { + return validation.NewFieldError(fname, "cannot exceed "+strconv.Itoa(max)+" characters") + } + + for _, c := range r { + if !unicode.IsPrint(c) && c != '\t' && c != '\n' { + return validation.NewFieldError(fname, "only printable characters allowed") + } + } + if unicode.IsSpace(r[0]) { + return validation.NewFieldError(fname, "cannot begin with a space") + } + if unicode.IsSpace(r[l-1]) { + return validation.NewFieldError(fname, "cannot end with a space") + } + return nil +} diff --git a/validation/validate/url.go b/validation/validate/url.go new file mode 100644 index 0000000000..2dfdb96ccc --- /dev/null +++ b/validation/validate/url.go @@ -0,0 +1,31 @@ +package validate + +import ( + "github.com/target/goalert/validation" + "net/url" +) + +// URL will validate a URL, returning a FieldError +// if invalid. +func URL(fname, urlStr string) error { + if _, err := url.Parse(urlStr); err != nil { + return validation.NewFieldError(fname, "must be a valid URL: "+err.Error()) + } + return nil +} + +// AbsoluteURL will validate that a URL is valid and contains +// a scheme and host. +func AbsoluteURL(fname, urlStr string) error { + u, err := url.Parse(urlStr) + if err != nil { + return validation.NewFieldError(fname, "must be a valid URL: "+err.Error()) + } + if u.Scheme == "" { + return validation.NewFieldError(fname, "scheme is required for URL") + } + if u.Host == "" { + return validation.NewFieldError(fname, "host is required for URL") + } + return nil +} diff --git a/validation/validate/username.go b/validation/validate/username.go new file mode 100644 index 0000000000..d28498d505 --- /dev/null +++ b/validation/validate/username.go @@ -0,0 +1,29 @@ +package validate + +import "github.com/target/goalert/validation" + +// UserName will validate a username to ensure it is between 3 and 24 characters, +// and only contains lower-case ASCII letters and numbers. +func UserName(fname, name string) error { + b := []byte(name) + l := len(b) + if l < 3 { + return validation.NewFieldError(fname, "must be at least 3 characters") + } + if l > 24 { + return validation.NewFieldError(fname, "cannot be more than 24 characters") + } + + for _, c := range name { + if c >= 'a' && c <= 'z' { + continue + } + if c >= '0' && c <= '9' { + continue + } + + return validation.NewFieldError(fname, "can only contain lower-case letters and digits") + } + + return nil +} diff --git a/validation/validate/uuid.go b/validation/validate/uuid.go new file mode 100644 index 0000000000..e71a1ce3d9 --- /dev/null +++ b/validation/validate/uuid.go @@ -0,0 +1,45 @@ +package validate + +import ( + "database/sql" + "github.com/target/goalert/validation" + "strconv" + + uuid "github.com/satori/go.uuid" +) + +// UUID will validate a UUID, returning a FieldError +// if invalid. +func UUID(fname, u string) error { + _, err := uuid.FromString(u) + if err != nil { + return validation.NewFieldError(fname, "must be a valid UUID: "+err.Error()) + } + return nil +} + +// NullUUID will validate a UUID, unless Null. It returns a FieldError +// if invalid. +func NullUUID(fname string, u sql.NullString) error { + if !u.Valid { + return nil + } + return UUID(fname, u.String) +} + +// ManyUUID will validate a slice of strings, checking each +// with the UUID validator. +func ManyUUID(fname string, ids []string, max int) error { + if max != -1 && len(ids) > max { + return validation.NewFieldError(fname, "must not have more than "+strconv.Itoa(max)) + } + errs := make([]error, 0, len(ids)) + var err error + for i, id := range ids { + err = UUID(fname+"["+strconv.Itoa(i)+"]", id) + if err != nil { + errs = append(errs, err) + } + } + return Many(errs...) +} diff --git a/web/.gitignore b/web/.gitignore new file mode 100644 index 0000000000..7982382e56 --- /dev/null +++ b/web/.gitignore @@ -0,0 +1,2 @@ +/inline_data_gen.go + diff --git a/web/bundle.go b/web/bundle.go new file mode 100644 index 0000000000..8885bfc996 --- /dev/null +++ b/web/bundle.go @@ -0,0 +1,3 @@ +package web + +//go:generate go run ../devtools/inliner -pkg $GOPACKAGE ./src/build/* ./src/build/static/* diff --git a/web/handler.go b/web/handler.go new file mode 100644 index 0000000000..d312c6595f --- /dev/null +++ b/web/handler.go @@ -0,0 +1,94 @@ +package web + +import ( + "bytes" + "net/http" + "net/http/httputil" + "net/url" + "os" + "path" + "strings" + "time" + + "github.com/pkg/errors" +) + +// NewHandler creates a new http.Handler that will serve UI files +// using bundled assets or by proxying to urlStr if set. +func NewHandler(urlStr string) (http.Handler, error) { + if urlStr == "" { + return newMemoryHandler(), nil + } + u, err := url.Parse(urlStr) + if err != nil { + return nil, errors.Wrap(err, "parse url") + } + + return httputil.NewSingleHostReverseProxy(u), nil +} + +type memoryHandler map[string]File +type memoryFile struct { + *bytes.Reader + file File +} + +func (m memoryHandler) Open(file string) (http.File, error) { + if f, ok := m["src/build"+file]; ok { + return &memoryFile{Reader: bytes.NewReader(f.Data()), file: f}, nil + } + + f, ok := m["src/build/index.html"] + if !ok { + return nil, errors.New("not found") + } + + return &memoryFile{Reader: bytes.NewReader(f.Data()), file: f}, nil +} + +func (m *memoryFile) Close() error { return nil } +func (m *memoryFile) Readdir(int) ([]os.FileInfo, error) { + return nil, errors.New("not a directory") +} +func (m *memoryFile) Stat() (os.FileInfo, error) { + return m, nil +} +func (m *memoryFile) Name() string { return path.Base(m.file.Name) } +func (m *memoryFile) Size() int64 { return int64(len(m.file.Data())) } +func (m *memoryFile) Mode() os.FileMode { return 0644 } +func (m *memoryFile) ModTime() time.Time { + if strings.Contains(m.file.Name, "/static/") { + return time.Time{} + } + + return time.Now() +} +func (m *memoryFile) IsDir() bool { return false } +func (m *memoryFile) Sys() interface{} { return nil } + +func rootFSFix(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if req.URL.Path == "/" { + // necessary to avoid redirect loop + req.URL.Path = "/alerts" + } + if strings.Contains(req.URL.Path, "/static/") { + w.Header().Add("Cache-Control", "public, immutable, max-age=315360000") + } + + h.ServeHTTP(w, req) + }) +} + +func newMemoryHandler() http.Handler { + if len(Files) > 0 { + // preload + go Files[0].Data() + } + m := make(memoryHandler, len(Files)) + for _, f := range Files { + m[f.Name] = f + } + + return rootFSFix(http.FileServer(m)) +} diff --git a/web/inline_types_gen.go b/web/inline_types_gen.go new file mode 100644 index 0000000000..afb11f43ae --- /dev/null +++ b/web/inline_types_gen.go @@ -0,0 +1,10 @@ +// Code generated by inliner DO NOT EDIT. + +package web + +type File struct { + Name string + Data func() []byte +} + +var Files []File diff --git a/web/src/.editorconfig b/web/src/.editorconfig new file mode 100644 index 0000000000..8d76a83275 --- /dev/null +++ b/web/src/.editorconfig @@ -0,0 +1,9 @@ +# editorconfig.org +root = true + +[*] +indent_style=space +indent_size = 2 +end_of_line = lf +charset = utf-8 +insert_final_newline = true \ No newline at end of file diff --git a/web/src/.eslintignore b/web/src/.eslintignore new file mode 100644 index 0000000000..bb91be69d5 --- /dev/null +++ b/web/src/.eslintignore @@ -0,0 +1,3 @@ +/build +/profile.json + diff --git a/web/src/.eslintrc.js b/web/src/.eslintrc.js new file mode 100644 index 0000000000..76f3d9ff00 --- /dev/null +++ b/web/src/.eslintrc.js @@ -0,0 +1,26 @@ +const fs = require('fs') +const path = require('path') + +module.exports = { + parser: 'babel-eslint', + parserOptions: { + ecmaFeatures: { legacyDecorators: true }, + }, + plugins: ['cypress', 'prettier'], + extends: [ + 'standard', + 'standard-jsx', + 'plugin:cypress/recommended', + 'plugin:prettier/recommended', + ], + rules: { + 'prettier/prettier': 'error', + }, + env: { + 'cypress/globals': true, + }, + globals: { + beforeAll: 'readonly', + afterAll: 'readonly', + }, +} diff --git a/web/src/.gitignore b/web/src/.gitignore new file mode 100644 index 0000000000..1f4299964e --- /dev/null +++ b/web/src/.gitignore @@ -0,0 +1,20 @@ +node_modules +__generated__ +.idea/ +/smoketest/output +bundle.js +npm-debug.log +build/ +coverage/ +/profile.json +.env +haters/ +*.iml +yarn-error.log +.DS_Store +/cypress/integration/examples/ +/cypress/videos/ +/cypress/screenshots/ +.cache/ + + diff --git a/web/src/.gqlconfig b/web/src/.gqlconfig new file mode 100644 index 0000000000..02c7548956 --- /dev/null +++ b/web/src/.gqlconfig @@ -0,0 +1,14 @@ +{ + schema: { + files: "schema.graphql" + }, + query: { + files: [ /* define file paths which you'd like the gql parser to watch and give autocomplete suggestions for */ + { + match: 'app/**/*.js', + parser: ['EmbeddedQueryParser', { startTag: 'graphql`', endTag: '`' }], + isRelay: true, + }, + ], + }, + } diff --git a/web/src/.prettierignore b/web/src/.prettierignore new file mode 100644 index 0000000000..c9e12bdfac --- /dev/null +++ b/web/src/.prettierignore @@ -0,0 +1,3 @@ +__generated__ +/build +/profile.json diff --git a/web/src/.stylelintrc b/web/src/.stylelintrc new file mode 100644 index 0000000000..40db42c668 --- /dev/null +++ b/web/src/.stylelintrc @@ -0,0 +1,3 @@ +{ + "extends": "stylelint-config-standard" +} diff --git a/web/src/app/actions/alerts.js b/web/src/app/actions/alerts.js new file mode 100644 index 0000000000..285ff55cb0 --- /dev/null +++ b/web/src/app/actions/alerts.js @@ -0,0 +1,34 @@ +import { setURLParam, resetURLParams } from './main' + +export const SET_ALERTS_CHECKED = 'SET_ALERTS_CHECKED' +export const SET_ALERTS_ACTION_COMPLETE = 'SET_ALERTS_ACTION_COMPLETE' + +// setAlertsStatusFilter will set the current alert status filter. +// A falsy value will result in the default (active) being set. +export function setAlertsStatusFilter(type) { + return setURLParam('filter', type && type !== 'active' ? type : null) +} + +// setAlertsAllServicesFilter will set the alert list to include all services. +export function setAlertsAllServicesFilter(bool) { + return setURLParam('allServices', bool ? '1' : null) +} + +// resetAlertsFilters will reset all alert list filters to their defaults (NOT including search). +export function resetAlertsFilters() { + return resetURLParams('filter', 'allServices') +} + +export function setCheckedAlerts(array) { + return { + type: SET_ALERTS_CHECKED, + payload: array, + } +} + +export function setAlertsActionComplete(bool) { + return { + type: SET_ALERTS_ACTION_COMPLETE, + payload: bool, + } +} diff --git a/web/src/app/actions/auth.js b/web/src/app/actions/auth.js new file mode 100644 index 0000000000..5b81fd93f7 --- /dev/null +++ b/web/src/app/actions/auth.js @@ -0,0 +1,17 @@ +export const AUTH_LOGOUT = 'AUTH_LOGOUT' + +// authLogout will update the user's auth state. +// +// If true is passed as an argument, a request to end +// the current session will be first made to the backend. +// +// AUTH_LOGOUT will be dispatched if, and after, the request completes. +export function authLogout(performFetch = false) { + const payload = { type: AUTH_LOGOUT } + if (!performFetch) return payload + return dispatch => + fetch('/api/v2/identity/logout', { + credentials: 'same-origin', + method: 'POST', + }).then(dispatch(payload)) +} diff --git a/web/src/app/actions/index.js b/web/src/app/actions/index.js new file mode 100644 index 0000000000..5acd02173a --- /dev/null +++ b/web/src/app/actions/index.js @@ -0,0 +1,4 @@ +export * from './alerts' +export * from './main' +export * from './service' +export * from './auth' diff --git a/web/src/app/actions/main.js b/web/src/app/actions/main.js new file mode 100644 index 0000000000..cc59d72a1d --- /dev/null +++ b/web/src/app/actions/main.js @@ -0,0 +1,78 @@ +import { replace } from 'connected-react-router' +export const SET_SHOW_NEW_USER_FORM = 'SET_SHOW_NEW_USER_FORM' + +// resetURLParams will reset all url parameters. +// +// An optional list of specific keys to reset can be passed. +export function resetURLParams(...keys) { + return (dispatch, getState) => { + const state = getState() + if (!keys.length) return dispatch(replace(state.router.location.pathname)) + + const q = new URLSearchParams(state.router.location.search) + keys.forEach(key => { + q.delete(key) + }) + if (q.sort) q.sort() + + const search = q.toString() + dispatch( + replace(state.router.location.pathname + (search ? '?' + search : '')), + ) + } +} + +const sanitizeParam = value => { + if (value === true) value = '1' + if (value === false) value = '' + if (!Array.isArray(value)) return value.trim() + + let filtered = value.filter(v => v) + if (filtered.length === 0) return null + + return filtered +} + +// setSearch will set the current search parameter/filter. +export function setSearch(value) { + return setURLParam('search', value || '') +} + +// setURLParam will update the URL parameter with the given name to the provided value. +// +// Falsy values will result in the parameter being cleared. +// The value can also be an array of strings. An empty array will result in the parameter +// being cleared. +export function setURLParam(name, _value, _default) { + return (dispatch, getState) => { + const state = getState() + + if (_value === _default) { + _value = '' + } + const value = sanitizeParam(_value) + + const q = new URLSearchParams(state.router.location.search) + if (Array.isArray(value)) { + q.delete(name) + value.forEach(v => q.append(name, v)) + } else if (value) { + q.set(name, value) + } else { + q.delete(name) + } + if (q.sort) q.sort() + + const search = q.toString() + dispatch( + replace(state.router.location.pathname + (search ? '?' + search : '')), + ) + } +} + +export function setShowNewUserForm(search) { + return { + type: SET_SHOW_NEW_USER_FORM, + payload: search, + } +} diff --git a/web/src/app/actions/service.js b/web/src/app/actions/service.js new file mode 100644 index 0000000000..71add9be36 --- /dev/null +++ b/web/src/app/actions/service.js @@ -0,0 +1,8 @@ +export const SET_SERVICE_SEARCH = 'SET_SERVICE_SEARCH' + +export function setServiceSearch(search) { + return { + type: SET_SERVICE_SEARCH, + payload: search, + } +} diff --git a/web/src/app/admin/AdminConfig.js b/web/src/app/admin/AdminConfig.js new file mode 100644 index 0000000000..e3bdcd518e --- /dev/null +++ b/web/src/app/admin/AdminConfig.js @@ -0,0 +1,174 @@ +import React from 'react' +import Query from '../util/Query' +import Button from '@material-ui/core/Button' +import Card from '@material-ui/core/Card' +import Grid from '@material-ui/core/Grid' +import Typography from '@material-ui/core/Typography' +import gql from 'graphql-tag' +import { chain, startCase, isEmpty } from 'lodash-es' +import AdminConfigSection from './AdminConfigSection' + +import withStyles from '@material-ui/core/styles/withStyles' +import AdminConfirmDialog from './AdminConfirmDialog' +import PageActions from '../util/PageActions' +import { Form } from '../forms' + +const query = gql` + query getConfig { + config(all: true) { + id + description + password + type + value + } + } +` + +const styles = theme => ({ + gridContainer: { + [theme.breakpoints.up('md')]: { + justifyContent: 'center', + }, + }, + gridItem: { + [theme.breakpoints.up('md')]: { + maxWidth: '65%', + }, + }, + groupTitle: { + fontSize: '1.1rem', + }, + saveDisabled: { + color: 'rgba(255, 255, 255, 0.5)', + }, +}) + +@withStyles(styles) +export default class AdminConfig extends React.PureComponent { + state = { + tab: 0, + confirm: false, + value: {}, + } + + updateValue = (id, value) => { + const newVal = { ...this.state.value } + + if (value === null) { + delete newVal[id] + } else { + newVal[id] = value + } + + this.setState({ value: newVal }) + } + + render() { + return ( + this.renderTabs(data.config)} + /> + ) + } + + renderTabs(configValues) { + const groups = chain(configValues) + .map(f => f.id.split('.')[0]) + .uniq() + .value() + + return ( + + + {groups.map((groupID, index) => ( + + + + {startCase(groupID).replace('Git Hub', 'GitHub')} + + + +
+ + this.updateValue(id, value)} + fields={configValues + .filter(f => f.id.split('.')[0] === groups[index]) + .map(f => ({ + id: f.id, + label: chain(f.id.split('.')) + .last() + .startCase() + .value() + .replace(/R Ls\b/, 'RLs'), // fix usages of `URLs` + description: f.description, + password: f.password, + type: f.type, + value: f.value, + }))} + /> + +
+
+
+ ))} +
+ + + + + {this.state.confirm && ( + this.setState({ confirm: false })} + onComplete={() => this.setState({ confirm: false, value: {} })} + /> + )} +
+ ) + } +} diff --git a/web/src/app/admin/AdminConfigSection.js b/web/src/app/admin/AdminConfigSection.js new file mode 100644 index 0000000000..1cee8f7a8f --- /dev/null +++ b/web/src/app/admin/AdminConfigSection.js @@ -0,0 +1,97 @@ +import React from 'react' +import p from 'prop-types' +import List from '@material-ui/core/List' +import ListItem from '@material-ui/core/ListItem' +import ListItemText from '@material-ui/core/ListItemText' +import { FormContainer } from '../forms' +import { defaultTo } from 'lodash-es' +import { + StringInput, + StringListInput, + IntegerInput, + BoolInput, +} from './AdminFieldComponents' +import withStyles from '@material-ui/core/styles/withStyles' + +const components = { + string: StringInput, + stringList: StringListInput, + integer: IntegerInput, + boolean: BoolInput, +} + +const styles = { + listItem: { + // leaves some room around fields without descriptions + // 71px is the height of the checkbox field without w/o a desc + minHeight: '71px', + padding: '1em', + }, + listItemText: { + maxWidth: '50%', + }, + listItemAction: { + width: '50%', + display: 'flex', + justifyContent: 'flex-end', + }, +} + +@withStyles(styles) +export default class AdminConfigSection extends React.PureComponent { + static propTypes = { + fields: p.arrayOf( + p.shape({ + id: p.string.isRequired, + label: p.string.isRequired, + description: p.string, + value: p.string.isRequired, + type: p.oneOf(['string', 'integer', 'stringList', 'boolean']) + .isRequired, + password: p.bool.isRequired, + }), + ), + } + + static defaultProps = { + fields: [], + } + + render() { + const { classes, fields, value } = this.props + + return ( + + + {fields.map((f, idx) => { + const Field = components[f.type] + + return ( + + +
+ + this.props.onChange(f.id, val === f.value ? null : val) + } + /> +
+
+ ) + })} +
+
+ ) + } +} diff --git a/web/src/app/admin/AdminConfirmDialog.js b/web/src/app/admin/AdminConfirmDialog.js new file mode 100644 index 0000000000..dcdf44864b --- /dev/null +++ b/web/src/app/admin/AdminConfirmDialog.js @@ -0,0 +1,103 @@ +import React from 'react' +import p from 'prop-types' +import List from '@material-ui/core/List' +import ListItem from '@material-ui/core/ListItem' +import ListItemText from '@material-ui/core/ListItemText' +import Typography from '@material-ui/core/Typography' +import gql from 'graphql-tag' +import { omit } from 'lodash-es' +import FormDialog from '../dialogs/FormDialog' +import { Mutation } from 'react-apollo' +import { graphql2Client } from '../apollo' +import { nonFieldErrors, fieldErrors } from '../util/errutil' +import Diff from '../util/Diff' + +const mutation = gql` + mutation($input: [ConfigValueInput!]) { + setConfig(input: $input) + } +` + +export default class AdminConfirmDialog extends React.PureComponent { + static propTypes = { + configValues: p.array.isRequired, + fieldValues: p.object.isRequired, + onClose: p.func.isRequired, + onComplete: p.func.isRequired, + } + + render() { + return ( + + {(commit, status) => this.renderConfirm(commit, status)} + + ) + } + + renderConfirm(commit, { error }) { + const changeKeys = Object.keys(this.props.fieldValues) + const changes = this.props.configValues + .filter(v => changeKeys.includes(v.id)) + .map(orig => ({ + id: orig.id, + oldValue: orig.value, + value: this.props.fieldValues[orig.id], + type: orig.type, + })) + + return ( + 1 ? 's' : ''}?`} + onClose={this.props.onClose} + onSubmit={() => + commit({ + variables: { + input: changes.map(c => omit(c, ['oldValue', 'type'])), + }, + }) + } + errors={nonFieldErrors(error).concat( + fieldErrors(error).map(e => ({ + message: `${e.field}: ${e.message}`, + })), + )} + form={ + + {changes.map(c => ( + + + } + > + {c.id} + + + ))} + + } + /> + ) + } +} diff --git a/web/src/app/admin/AdminFieldComponents.js b/web/src/app/admin/AdminFieldComponents.js new file mode 100644 index 0000000000..d31b26d688 --- /dev/null +++ b/web/src/app/admin/AdminFieldComponents.js @@ -0,0 +1,95 @@ +import React from 'react' +import p from 'prop-types' +import Grid from '@material-ui/core/Grid' +import IconButton from '@material-ui/core/IconButton' +import Input from '@material-ui/core/Input' +import InputAdornment from '@material-ui/core/InputAdornment' +import Switch from '@material-ui/core/Switch' +import Visibility from '@material-ui/icons/Visibility' +import VisibilityOff from '@material-ui/icons/VisibilityOff' + +export const StringListInput = props => { + const value = props.value ? props.value.split('\n').concat('') : [''] + return ( + + {value.map((val, idx) => ( + + + props.onChange( + value + .slice(0, idx) + .concat(newVal, ...value.slice(idx + 1)) + .filter(v => v) + .join('\n'), + ) + } + autoComplete='new-password' + password={props.password} + /> + + ))} + + ) +} + +export class StringInput extends React.PureComponent { + static propTypes = { + password: p.bool, + } + + state = { + showPassword: false, + } + + render() { + const { onChange, password, ...rest } = this.props + + return ( + onChange(e.target.value)} + endAdornment={this.renderPasswordAdornment()} + {...rest} + /> + ) + } + + renderPasswordAdornment() { + if (!this.props.password) return null + + return ( + + + this.setState({ showPassword: !this.state.showPassword }) + } + > + {this.state.showPassword ? : } + + + ) + } +} + +export const IntegerInput = props => ( + props.onChange(e.target.value)} + /> +) + +export const BoolInput = props => ( + props.onChange(e.target.checked ? 'true' : 'false')} + /> +) diff --git a/web/src/app/admin/AdminRouter.js b/web/src/app/admin/AdminRouter.js new file mode 100644 index 0000000000..6a8c83a492 --- /dev/null +++ b/web/src/app/admin/AdminRouter.js @@ -0,0 +1,22 @@ +import React from 'react' +import { Switch, Route } from 'react-router-dom' +import { GenericError, PageNotFound } from '../error-pages/Errors' +import AdminConfig from './AdminConfig' +import RequireConfig from '../util/RequireConfig' + +export const AdminRouter = () => ( + + ( + }> + + + )} + /> + + +) + +export default AdminRouter diff --git a/web/src/app/admin/index.js b/web/src/app/admin/index.js new file mode 100644 index 0000000000..d5c0d99d3a --- /dev/null +++ b/web/src/app/admin/index.js @@ -0,0 +1,4 @@ +export * from './AdminPage' +export { + default as CreateTraceTokenMutation, +} from './mutations/CreateTraceTokenMutation' diff --git a/web/src/app/alerts/AlertRouter.js b/web/src/app/alerts/AlertRouter.js new file mode 100644 index 0000000000..703b7e0204 --- /dev/null +++ b/web/src/app/alerts/AlertRouter.js @@ -0,0 +1,18 @@ +import React from 'react' +import { Switch, Route } from 'react-router-dom' + +import AlertList from './pages/AlertsIndexPage' +import AlertDetails from './pages/AlertDetailPage' +import { PageNotFound } from '../error-pages/Errors' + +export default class AlertRouter extends React.PureComponent { + render() { + return ( + + + + + + ) + } +} diff --git a/web/src/app/alerts/components/AlertDetails.js b/web/src/app/alerts/components/AlertDetails.js new file mode 100644 index 0000000000..3e4c1976a8 --- /dev/null +++ b/web/src/app/alerts/components/AlertDetails.js @@ -0,0 +1,474 @@ +import React, { Component } from 'react' +import p from 'prop-types' +import Card from '@material-ui/core/Card' +import CardContent from '@material-ui/core/CardContent' +import Divider from '@material-ui/core/Divider' +import FormControlLabel from '@material-ui/core/FormControlLabel' +import Grid from '@material-ui/core/Grid' +import Hidden from '@material-ui/core/Hidden' +import List from '@material-ui/core/List' +import ListItem from '@material-ui/core/ListItem' +import ListItemText from '@material-ui/core/ListItemText' +import Switch from '@material-ui/core/Switch' +import Table from '@material-ui/core/Table' +import TableBody from '@material-ui/core/TableBody' +import TableCell from '@material-ui/core/TableCell' +import TableHead from '@material-ui/core/TableHead' +import TableRow from '@material-ui/core/TableRow' +import Typography from '@material-ui/core/Typography' +import withStyles from '@material-ui/core/styles/withStyles' +import isFullScreen from '@material-ui/core/withMobileDialog' +import moment from 'moment' +import Countdown from 'react-count-down' +import { Link } from 'react-router-dom' +import { ScheduleLink, ServiceLink, UserLink } from '../../links' +import { styles } from '../../styles/materialStyles' +import Options from '../../util/Options' +import gql from 'graphql-tag' +import PageActions from '../../util/PageActions' +import Markdown from '../../util/Markdown' + +const localStorage = window.localStorage +const exactTimesKey = 'show_exact_times' + +const sortTime = (a, b) => { + const ma = moment(a.timestamp) + const mb = moment(b.timestamp) + if (ma.isSame(mb)) return 0 + return ma.isAfter(mb) ? -1 : 1 +} + +@withStyles(styles) +@isFullScreen() +export default class AlertDetails extends Component { + static propTypes = { + loading: p.bool, + error: p.shape({ message: p.string }), + } + + constructor(props) { + super(props) + + // localstorage stores true/false as a string; convert to a bool + // default to true if localstorage is not set + let showExactTimes = localStorage.getItem(exactTimesKey) || false + if (typeof showExactTimes !== 'boolean') { + showExactTimes = showExactTimes === 'true' + } + + this.state = { + fullDescription: false, + loading: '', + escalateWaiting: false, + showExactTimes, + } + } + + /* + * Update state and local storage with new boolean value + * telling whether or not the show exact times toggle is active + */ + toggleExactTimes = () => { + const newVal = !this.state.showExactTimes + this.setState({ + showExactTimes: newVal, + }) + localStorage.setItem(exactTimesKey, newVal.toString()) + } + + renderAlertLogEvents() { + let logs = this.props.data.logs_2.slice(0).sort(sortTime) + + if (logs.length === 0) { + return ( +
+ + + + +
+ ) + } + + return logs.map((log, index) => { + let alertTimeStamp = moment(log.timestamp) + .local() + .calendar() + if (this.state.showExactTimes) { + alertTimeStamp = moment(log.timestamp) + .local() + .format('MMM Do YYYY, h:mm:ss a') + } + return ( +
+ + + + +
+ ) + }) + } + + renderAlertLogs() { + return ( + +
+ + Event Log + + + } + label='Full Timestamps' + style={{ padding: '0.5em 0.5em 0 0' }} + /> +
+ + {this.renderAlertLogEvents()} + +
+ ) + } + + renderUsers(users, stepID) { + return users.map((user, i) => { + const sep = i === 0 ? '' : ', ' + return ( + + {sep} + {UserLink(user)} + + ) + }) + } + + renderSchedules(schedules, stepID) { + return schedules.map((schedule, i) => { + const sep = i === 0 ? '' : ', ' + return ( + + {sep} + {ScheduleLink(schedule)} + + ) + }) + } + + /* + * Returns properties from the escalation policy + * for easier use in functions. + */ + epsHelper() { + const eps = this.props.data.escalation_policy_snapshot + const alert = this.props.data + return { + repeat: eps.repeat, + numSteps: eps.steps.length, + steps: eps.steps, + status: alert.status.toLowerCase(), + currentLevel: eps.current_level, + lastEscalation: eps.last_escalation, + } + } + + canAutoEscalate() { + const { repeat, numSteps, status, currentLevel } = this.epsHelper() + if (status !== 'unacknowledged') return false + if (repeat === -1) return true + return currentLevel + 1 < numSteps * (repeat + 1) + } + + /* + * Renders a timer that counts down time until the next escalation + */ + renderTimer(index, delayMinutes) { + const { currentLevel, numSteps, lastEscalation } = this.epsHelper() + const prevEscalation = new Date(lastEscalation) + + if (!this.canAutoEscalate()) { + return
{delayMinutes} minutes
+ } + + if (currentLevel % numSteps === index) { + return ( + + ) + } + + return
+ } + + renderEscalationPolicySteps() { + const { steps, status, currentLevel } = this.epsHelper() + return steps.map((step, index) => { + const { schedules, delay_minutes: delayMinutes, users } = step + + let usersRender + if (users.length > 0) { + usersRender =
Users: {this.renderUsers(users, step.id)}
+ } + + let schedulesRender + if (schedules.length > 0) { + schedulesRender = ( +
Schedules: {this.renderSchedules(schedules, step.id)}
+ ) + } + + let className + if (status !== 'closed' && currentLevel % steps.length === index) { + className = this.props.classes.highlightRow + } + return ( + + Step #{index + 1} + + {usersRender} + {schedulesRender} + + {this.renderTimer(index, delayMinutes)} + + ) + }) + } + + renderEscalationPolicy() { + const alert = this.props.data + + return ( + + + + + Escalation Policy + + + + + + + + Step + Alert + + {this.canAutoEscalate() + ? 'Time Until Next Escalation' + : 'Time Between Escalations'} + + + + {this.renderEscalationPolicySteps()} +
+
+
+ ) + } + + renderAlertDetails() { + const alert = this.props.data + let details = (alert.details || '').trim() + if (!details) return null + + if (!this.state.fullDescription && details.length > 1000) { + details = details.slice(0, 1000).trim() + ' ...' + } + + let expandTextAction = null + if (details.length > 1000) { + let text = 'Show Less' + + if (!this.state.fullDescription) { + text = 'Show More' + } + + expandTextAction = ( + { + this.setState({ fullDescription: !this.state.fullDescription }) + }} + style={{ + display: 'flex', + alignItems: 'center', + cursor: 'pointer', + justifyContent: 'center', + textAlign: 'center', + paddingTop: '1em', + }} + > + {text} + + ) + } + + return ( + + + + Details + + {expandTextAction} + + + + ) + } + + /* + * Options to show for alert details menu + */ + getMenuOptions = () => { + const { + escalation_level: escalationLevel, + number: id, + status, + } = this.props.data + if (status.toLowerCase() === 'closed') return [] // no options to show if alert is already closed + + const updateStatusMutation = gql` + mutation UpdateAlertStatusMutation($input: UpdateAlertStatusInput!) { + updateAlertStatus(input: $input) { + id + status: status_2 + logs_2 { + event + message + timestamp + } + } + } + ` + + let options = [] + const ack = { + text: 'Acknowledge', + mutation: { + query: updateStatusMutation, + variables: { + input: { + id, + status_2: 'acknowledged', + }, + }, + }, + } + + const esc = { + text: 'Escalate', + mutation: { + query: gql` + mutation EscalateAlertMutation($input: EscalateAlertInput!) { + escalateAlert(input: $input) { + id + status: status_2 + logs_2 { + event + message + timestamp + } + } + } + `, + variables: { + input: { + id, + current_escalation_level: escalationLevel, + }, + }, + }, + } + + const close = { + text: 'Close', + mutation: { + query: updateStatusMutation, + variables: { + input: { + id, + status_2: 'closed', + }, + }, + }, + } + + options.push(esc) + options.push(close) + if (status.toLowerCase() === 'unacknowledged') options.push(ack) + return options + } + + getCardClassName = () => { + const { classes, fullScreen } = this.props + return fullScreen ? classes.cardFull : classes.card + } + + render() { + const { classes, data: alert } = this.props + + const options = this.getMenuOptions() + const optionsMenu = + options.length > 0 ? : null + + return ( + + {optionsMenu} + + + + + + + {ServiceLink(alert.service)} + + + + + {alert.number}: {alert.summary} + + + + + {alert.status.toUpperCase()} + + + + + + + {this.renderAlertDetails()} + + + {this.renderEscalationPolicy()} + + + + {this.renderAlertLogs()} + + + ) + } +} diff --git a/web/src/app/alerts/components/AlertForm.js b/web/src/app/alerts/components/AlertForm.js new file mode 100644 index 0000000000..32b2e5ef74 --- /dev/null +++ b/web/src/app/alerts/components/AlertForm.js @@ -0,0 +1,223 @@ +import React, { Component } from 'react' +import { PropTypes as p } from 'prop-types' +import FormControl from '@material-ui/core/FormControl' +import FormHelperText from '@material-ui/core/FormHelperText' +import Grid from '@material-ui/core/Grid' +import TextField from '@material-ui/core/TextField' +import gql from 'graphql-tag' +import { ServiceSelect } from '../../selection' +import ApolloFormDialog from '../../dialogs/components/ApolloFormDialog' + +const mutation = gql` + mutation CreateAlertMutation($input: CreateAlertInput!) { + createAlert(input: $input) { + number: _id + id + status: status_2 + created_at + escalation_level + description + details + summary + service_id + source + assignments { + id + name + } + service { + id + name + escalation_policy_id + } + logs_2 { + event + message + timestamp + } + escalation_policy_snapshot { + repeat + current_level + last_escalation + steps { + delay_minutes + users { + id + name + } + schedules { + id + name + } + } + } + } + } +` + +export default class AlertForm extends Component { + static contextTypes = { + router: p.object, + } + + constructor(props) { + super(props) + + let sid + if (props.service) sid = props.service.id + + this.state = { + sid, + errorMessage: '', + submitted: false, + summary: '', + details: '', + readOnly: false, + } + } + + shouldSubmit = () => { + this.setState({ submitted: true }) + + const shouldSubmit = !this.validateForm() + if (shouldSubmit) { + this.setState({ readOnly: true }) + return true + } + + return false + } + + onSuccess = (cache, data) => { + const alert = data.createAlert + + // Get created alert ID back from the promise from mutation + const alertPage = '/alerts/' + encodeURIComponent(alert.number) + + // Redirect to created alert's page + this.context.router.history.push(alertPage) + } + + getVariables = () => { + return { + input: { + service_id: this.state.sid, + description: + this.state.summary.trim() + '\n' + this.state.details.trim(), + }, + } + } + + validateForm() { + return this.validateService(true) || this.validateSummary(true) + } + + validateService(submitted = this.state.submitted) { + if (!submitted) return '' + if (!this.state.sid) return 'A service must be selected' + return '' + } + + validateSummary(submitted = this.state.submitted) { + if (!submitted) return '' + if (this.state.summary.length < 2) { + return 'Summary must be at least 2 characters' + } + return '' + } + + renderServiceField() { + if (this.props.service) { + return ( + + ) + } + + return ( + this.setState({ sid: value })} + label='Select Service' + name='service' + errorMessage={this.validateService()} + disabled={this.state.readOnly} + /> + ) + } + + resetForm = () => { + const { service } = this.props + + // Reset the form when we open it. + this.setState({ + sid: service ? service.id : null, + errorMessage: '', + submitted: false, + summary: '', + details: '', + readOnly: false, + }) + } + + render() { + const { open } = this.props + + const formFields = ( + + + + this.setState({ summary: event.target.value })} + /> + {this.validateSummary()} + + + + + this.setState({ details: event.target.value })} + /> + + + + {this.renderServiceField()} + + + ) + + return ( + this.setState({ readOnly: false })} + fields={formFields} + getVariables={this.getVariables} + mutation={mutation} + onRequestClose={this.props.handleRequestClose} + resetForm={this.resetForm} + open={open} + shouldSubmit={this.shouldSubmit} + onSuccess={this.onSuccess} + title='Create New Alert' + /> + ) + } +} diff --git a/web/src/app/alerts/components/AlertsList.js b/web/src/app/alerts/components/AlertsList.js new file mode 100644 index 0000000000..e4a2f50525 --- /dev/null +++ b/web/src/app/alerts/components/AlertsList.js @@ -0,0 +1,370 @@ +import React, { Component } from 'react' +import { PropTypes as p } from 'prop-types' +import Card from '@material-ui/core/Card' +import InfoIcon from '@material-ui/icons/Info' +import List from '@material-ui/core/List' +import Grid from '@material-ui/core/Grid' +import ListItem from '@material-ui/core/ListItem' +import ListItemText from '@material-ui/core/ListItemText' +import Snackbar from '@material-ui/core/Snackbar' +import SnackbarContent from '@material-ui/core/SnackbarContent' +import withStyles from '@material-ui/core/styles/withStyles' +import isFullScreen from '@material-ui/core/withMobileDialog' +import { debounce } from 'lodash-es' +import { graphql } from 'react-apollo' +import InfiniteScroll from 'react-infinite-scroll-component' +import { styles as globalStyles } from '../../styles/materialStyles' +import { getParameterByName } from '../../util/query_param' +import CreateAlertFab from './CreateAlertFab' +import AlertsListDataWrapper from './AlertsListDataWrapper' +import { alertsQuery } from '../queries/AlertsListQuery' +import { connect } from 'react-redux' +import CheckedAlertsFormControl from './CheckedAlertsFormControl' +import { GenericError } from '../../error-pages' +import { withRouter } from 'react-router-dom' +import Hidden from '@material-ui/core/Hidden' +import { + searchSelector, + alertAllServicesSelector, + alertFilterSelector, +} from '../../selectors/url' +import AlertsListControls from '../components/AlertsListControls' + +const LIMIT = 25 + +const styles = theme => ({ + ...globalStyles(theme), + snackbar: { + backgroundColor: theme.palette.primary['500'], + height: '6.75em', + width: '20em', // only triggers on desktop, 100% on mobile devices + }, + snackbarIcon: { + fontSize: 20, + opacity: 0.9, + marginRight: theme.spacing.unit, + }, + snackbarMessage: { + display: 'flex', + alignItems: 'center', + }, +}) + +const filterToOmit = filter => { + switch (filter) { + case 'acknowledged': + return { omit_active: false, omit_triggered: true, omit_closed: true } + case 'unacknowledged': + return { omit_active: true, omit_triggered: false, omit_closed: true } + case 'closed': + return { omit_active: true, omit_triggered: true, omit_closed: false } + case 'all': + return { omit_active: false, omit_triggered: false, omit_closed: false } + } + + // active (default) + return { omit_active: false, omit_triggered: false, omit_closed: true } +} + +/* + * Returns true if the first specified array contains all elements + * from the second one. False otherwise. + */ +function arrayContainsArray(superset, subset) { + return subset.every(value => superset.indexOf(value) >= 0) +} + +const mapStateToProps = state => { + return { + actionComplete: state.alerts.actionComplete, + allServices: alertAllServicesSelector(state), + isFirstLogin: state.main.isFirstLogin, + searchParam: searchSelector(state), + filter: alertFilterSelector(state), + } +} + +@connect(mapStateToProps) // must connect to redux before calling graphql +@graphql(alertsQuery, { + options: props => { + return { + variables: { + favorite_services_only: props.serviceID ? false : !props.allServices, + service_id: props.serviceID || '', + search: props.searchParam, + sort_desc: getParameterByName('sortDesc') === 'true', + limit: LIMIT, + offset: 0, + ...filterToOmit(props.filter), + sort_by: getParameterByName('sortBy') || 'status', // status, id, created_at, summary, or service + favorites_first: true, + favorites_only: true, + services_limit: 1, + services_search: '', + }, + notifyOnNetworkStatusChange: true, // updates data.loading bool for refetching, and fetching more + fetchPolicy: 'cache-and-network', + } + }, + props: props => { + return { + data: props.data, + loadMore: queryVariables => { + return props.data.fetchMore({ + variables: queryVariables, + updateQuery(previousResult, { fetchMoreResult }) { + if (!fetchMoreResult) return previousResult + + const p = previousResult.alerts2 // previous + const n = fetchMoreResult.alerts2 // next + + const pIDs = p.items.map(i => i.id) + const nIDs = n.items.map(i => i.id) + + // return previous result if the whole next result is duplicate data + // IDs will always be unique + if (arrayContainsArray(pIDs, nIDs)) return previousResult + + return Object.assign({}, previousResult, { + // append the new alerts results to the old one + alerts2: { + __typename: n.__typename, + items: [...p.items, ...n.items], + total_count: n.total_count, + }, + }) + }, + }) + }, + } + }, +}) +@withStyles(styles) +@isFullScreen() +@withRouter +export default class AlertsList extends Component { + static contextTypes = { + router: p.object, + } + + state = { + snackbarOpen: true, // always open unless clicked away from or there are services present + } + + // TODO: Temp fix until apollo cache updated after all relevant mutations affecting this component + componentDidMount() { + this.refetch() + } + + componentWillUnmount() { + this.refetch.cancel() + } + + /* + * Display current data until new data loads in when refetching + * i.e. only show loading placeholders on first page load + */ + shouldComponentUpdate(nextProps, nextState, nextContext) { + return !( + this.props.data.alerts2 && + (nextProps.data.networkStatus === 2 || nextProps.data.networkStatus === 4) + ) + } + + handleCloseSnackbar = (event, reason) => { + if (reason === 'clickaway') { + this.setState({ snackbarOpen: false }) + } + } + + /* + * Refetch from scratch after a filter is changed + */ + refetch = debounce(extraProps => { + const offset = 0 + this.props.data.refetch(this.getQueryData(offset, extraProps)) + }, 100) + + getQueryData = (offset, extraProps) => { + return { + favorite_services_only: this.props.serviceID + ? false + : !this.props.allServices, + service_id: this.props.serviceID || '', // TODO: adding the || "" ensures we get the same cache key as elsewhere, let's find a better way to normalize... + search: this.props.searchParam, + sort_desc: getParameterByName('sortDesc') === 'true', + offset: offset, + limit: LIMIT, + ...filterToOmit(this.props.filter), + sort_by: getParameterByName('sortBy') || 'status', + favorites_first: true, + favorites_only: true, + services_limit: 1, + services_search: '', + ...extraProps, + } + } + + renderLoading = () => { + const style = { + color: 'lightgrey', + background: 'lightgrey', + height: '0.875em', + } + + let loadingItems = [] + for (let i = 0; i < 5; i++) { + loadingItems.push( + + + + + , + ) + } + + return loadingItems + } + + renderError = data => { + return ( + + + + ) + } + + renderNoResults = () => { + return ( + + + + ) + } + + render() { + const { + actionComplete, + allServices, + classes, + data, + fullScreen, + onServicePage, + isFirstLogin, + loadMore, + service, + onServicesPage, + } = this.props + const { snackbarOpen } = this.state + + // status 2: setting variables (occurs when refetching) + // status 4: refetching + const net = data.networkStatus + const isLoading = net === 2 || net === 4 + + let offset = 0 + let len = 0 + let hasMore = true + + if (data.alerts2) { + offset = len = data.alerts2.items.length + hasMore = offset < data.alerts2.total_count && !data.error + if (len <= LIMIT) this.props.data.startPolling(3500) + else this.props.data.stopPolling() + } + + // Scrollable infinite list should be sorted by id, can be adjusted with status filters/search in appbar + const noFavorites = + data.services2 && + data.services2.items && + data.services2.items.length === 0 && + !onServicesPage + + let content = null + if (data.error) content = this.renderError(data) + else if (isLoading) content = this.renderLoading() + else if (data.alerts2 && !isLoading && !data.alerts2.items.length) + content = this.renderNoResults() + + let dataToShow = data.alerts2 ? data.alerts2.items : [] + if (!content) { + content = dataToShow.map(alert => ( + + )) + } + + const showFavoritesWarning = + snackbarOpen && + noFavorites && + !allServices && + !onServicesPage && + !isFirstLogin + return [ + , + + + + It looks like you have no favorited services. Visit your most used + services to set them as a favorite, or enable the filter to view + alerts for all services. + + } + /> + , + , + + + + + + + + + loadMore(this.getQueryData(offset))} + dataLength={len} + hasMore={hasMore} + loader={null} + scrollThreshold={(len - 20) / len} + style={{ overflow: 'hidden' }} + > + {content} + + + + + , + ] + } +} diff --git a/web/src/app/alerts/components/AlertsListControls.js b/web/src/app/alerts/components/AlertsListControls.js new file mode 100644 index 0000000000..cdaf1802aa --- /dev/null +++ b/web/src/app/alerts/components/AlertsListControls.js @@ -0,0 +1,40 @@ +import React from 'react' +import Tabs from '@material-ui/core/Tabs' +import Tab from '@material-ui/core/Tab' +import { connect } from 'react-redux' +import { setAlertsStatusFilter } from '../../actions' +import { alertFilterSelector } from '../../selectors/url' + +const mapStateToProps = state => ({ + filter: alertFilterSelector(state), +}) + +const mapDispatchToProps = dispatch => ({ + setAlertsStatusFilter: value => dispatch(setAlertsStatusFilter(value)), +}) + +const tabs = ['active', 'unacknowledged', 'acknowledged', 'closed', 'all'] + +@connect( + mapStateToProps, + mapDispatchToProps, +) +export default class AlertsListControls extends React.PureComponent { + render() { + return ( + this.props.setAlertsStatusFilter(tabs[idx])} + centered + indicatorColor='primary' + textColor='primary' + > + + + + + + + ) + } +} diff --git a/web/src/app/alerts/components/AlertsListDataWrapper.js b/web/src/app/alerts/components/AlertsListDataWrapper.js new file mode 100644 index 0000000000..4159dddacd --- /dev/null +++ b/web/src/app/alerts/components/AlertsListDataWrapper.js @@ -0,0 +1,169 @@ +import React, { Component } from 'react' +import { PropTypes as p } from 'prop-types' +import Checkbox from '@material-ui/core/Checkbox' +import ListItem from '@material-ui/core/ListItem' +import ListItemText from '@material-ui/core/ListItemText' +import ListItemSecondaryAction from '@material-ui/core/ListItemSecondaryAction' +import Typography from '@material-ui/core/Typography' +import moment from 'moment' +import withStyles from '@material-ui/core/styles/withStyles' +import { connect } from 'react-redux' +import { setCheckedAlerts } from '../../actions' +import { bindActionCreators } from 'redux' +import statusStyles from '../../util/statusStyles' +import { alertFilterSelector } from '../../selectors' + +const styles = { + checkBox: { + marginRight: '27px', + padding: '4px', // match form control padding + }, + iconButton: { + width: 'fit-content', + }, + listItem: { + width: '100%', + }, + ...statusStyles, +} + +const mapStateToProps = state => ({ + allChecked: state.alerts.allChecked, + checkedAlerts: state.alerts.checkedAlerts, + filter: alertFilterSelector(state), +}) + +const mapDispatchToProps = dispatch => + bindActionCreators( + { + setCheckedAlerts, + }, + dispatch, + ) + +@withStyles(styles) +@connect( + mapStateToProps, + mapDispatchToProps, +) +export default class AlertsListDataWrapper extends Component { + static contextTypes = { + router: p.object, + } + + static propTypes = { + alert: p.object.isRequired, + onServicePage: p.bool, + } + + componentWillMount() { + moment.updateLocale('en', { + relativeTime: { + future: 'in %s', + past: '%s ago', + s: '< 1m', + m: '1m', + mm: '%dm', + h: '1h', + hh: '%dh', + d: '1d', + dd: '%dd', + M: '1mo', + MM: '%dmo', + y: '1y', + yy: '%dy', + }, + }) + } + + componentWillReceiveProps(nextProps, nextContext) { + const { allChecked, checkedAlerts, setCheckedAlerts } = this.props + const { alert, allChecked: nextChecked } = nextProps + + if (!allChecked && nextChecked) { + setCheckedAlerts([...checkedAlerts, alert.number]) + } + } + + toggleChecked = id => { + const { checkedAlerts: _checkedAlerts, setCheckedAlerts } = this.props + const checkedAlerts = _checkedAlerts.slice() // copy array + + if (checkedAlerts.includes(id)) { + const idx = checkedAlerts.indexOf(id) + checkedAlerts.splice(idx, 1) // removes at index + setCheckedAlerts(checkedAlerts) + } else { + checkedAlerts.push(id) + setCheckedAlerts(checkedAlerts) + } + } + + render() { + const { alert, checkedAlerts, classes, onServicePage } = this.props + + const checkbox = ( + this.toggleChecked(alert.number)} + /> + ) + + let statusClass + switch (alert.status.toLowerCase()) { + case 'unacknowledged': + statusClass = classes.statusError + break + case 'acknowledged': + statusClass = classes.statusWarning + break + default: + statusClass = classes.noStatus + break + } + + return ( + + {checkbox} +
+ this.context.router.history.push(`/alerts/${alert.number}`) + } + > + + + {alert.number}: + {alert.status.toUpperCase()} + + {onServicePage ? null : ( + {alert.service.name} + )} + + {alert.summary} + + + + + + {moment(alert.created_at) + .local() + .fromNow()} + + + +
+
+ ) + } +} diff --git a/web/src/app/alerts/components/AlertsListFilter.js b/web/src/app/alerts/components/AlertsListFilter.js new file mode 100644 index 0000000000..e1d42f2fae --- /dev/null +++ b/web/src/app/alerts/components/AlertsListFilter.js @@ -0,0 +1,213 @@ +import React, { Component } from 'react' +import p from 'prop-types' +import Button from '@material-ui/core/Button' +import IconButton from '@material-ui/core/IconButton' +import Popover from '@material-ui/core/Popover' +import FilterList from '@material-ui/icons/FilterList' +import Hidden from '@material-ui/core/Hidden' +import SwipeableDrawer from '@material-ui/core/SwipeableDrawer' +import Switch from '@material-ui/core/Switch' +import Grid from '@material-ui/core/Grid' +import { withStyles } from '@material-ui/core/styles' +import { styles as globalStyles } from '../../styles/materialStyles' +import Radio from '@material-ui/core/Radio' +import RadioGroup from '@material-ui/core/RadioGroup' +import FormControlLabel from '@material-ui/core/FormControlLabel' +import FormControl from '@material-ui/core/FormControl' +import withWidth, { isWidthUp } from '@material-ui/core/withWidth' +import classnames from 'classnames' +import { connect } from 'react-redux' +import { + resetAlertsFilters, + setAlertsStatusFilter, + setAlertsAllServicesFilter, +} from '../../actions' +import { alertAllServicesSelector, alertFilterSelector } from '../../selectors' + +const styles = theme => ({ + ...globalStyles(theme), + drawer: { + width: 'fit-content', // width placed on mobile drawer + }, + grid: { + margin: '0.5em', // margin in grid container + }, + gridItem: { + display: 'flex', + alignItems: 'center', // aligns text with toggles + }, + formControl: { + width: '100%', // date pickers full width + }, + popover: { + width: '17em', // width placed on desktop popover + }, +}) + +const mapStateToProps = state => ({ + allServices: alertAllServicesSelector(state), + filter: alertFilterSelector(state), +}) + +const mapDispatchToProps = dispatch => ({ + resetAll: () => dispatch(resetAlertsFilters()), // don't reset search param + setFilter: value => dispatch(setAlertsStatusFilter(value)), + setAllServices: value => dispatch(setAlertsAllServicesFilter(value)), +}) + +@withStyles(styles) +@withWidth() +@connect( + mapStateToProps, + mapDispatchToProps, +) +export default class AlertsListFilter extends Component { + static propTypes = { + serviceID: p.string, + allServices: p.bool.isRequired, + filter: p.string.isRequired, + } + state = { + show: false, + anchorEl: null, // element in which filters form under + } + + handleOpenFilters = event => { + this.setState({ + anchorEl: event.currentTarget, + show: true, + }) + } + + handleCloseFilters = () => { + this.setState({ + show: false, + }) + } + + renderFilters = () => { + const { allServices, classes, filter, serviceID: sid, width } = this.props + const { resetAll, setFilter, setAllServices } = this.props + + // grabs class for width depending on breakpoints (md or higher uses popover width) + const widthClass = isWidthUp('md', width) ? classes.popover : classes.drawer + const gridClasses = classnames(classes.grid, widthClass) + + let favoritesFilter = null + if (!sid) { + favoritesFilter = ( + setAllServices(!allServices)} + /> + } + label='Include All Services' + /> + ) + } + + const content = ( + + + + {favoritesFilter} + setFilter(e.target.value)} + > + } + label='Active' + /> + } + label='Unacknowledged' + /> + } + label='Acknowledged' + /> + } + label='Closed' + /> + } + label='All' + /> + + + + + + + + + ) + + // renders a popover on desktop, and a swipeable drawer on mobile devices + return [ + + this.state.anchorEl} + open={!!this.state.anchorEl && this.state.show} + onClose={this.handleCloseFilters} + anchorOrigin={{ + vertical: 'bottom', + horizontal: 'right', + }} + transformOrigin={{ + vertical: 'top', + horizontal: 'right', + }} + > + {content} + + , + + + {content} + + , + ] + } + + /* + * Finds the parent toolbar DOM node and appends the options + * element to that node (after all the toolbar's children + * are done being rendered) + */ + render() { + return ( +
+ + + + {this.renderFilters()} +
+ ) + } +} diff --git a/web/src/app/alerts/components/CheckedAlertsFormControl.js b/web/src/app/alerts/components/CheckedAlertsFormControl.js new file mode 100644 index 0000000000..9362d50203 --- /dev/null +++ b/web/src/app/alerts/components/CheckedAlertsFormControl.js @@ -0,0 +1,397 @@ +import React, { Component } from 'react' +import { PropTypes as p } from 'prop-types' +import { setCheckedAlerts, setAlertsActionComplete } from '../../actions' +import { bindActionCreators } from 'redux' +import { connect } from 'react-redux' +import Checkbox from '@material-ui/core/Checkbox' +import Grid from '@material-ui/core/Grid' +import IconButton from '@material-ui/core/IconButton' +import Tooltip from '@material-ui/core/Tooltip' +import ArrowDropDown from '@material-ui/icons/ArrowDropDown' +import AcknowledgeIcon from '@material-ui/icons/Check' +import CloseIcon from '@material-ui/icons/Close' +import EscalateIcon from '@material-ui/icons/ArrowUpward' +import withStyles from '@material-ui/core/styles/withStyles' +import { styles as globalStyles } from '../../styles/materialStyles' +import Icon from '@material-ui/core/Icon' +import OtherActions from '../../util/OtherActions' +import classnames from 'classnames' +import gql from 'graphql-tag' +import { Mutation } from 'react-apollo' +import UpdateAlertsSnackbar from './UpdateAlertsSnackbar' +import { graphql2Client } from '../../apollo' +import withWidth from '@material-ui/core/withWidth' +import { alertFilterSelector } from '../../selectors' + +const updateAlerts = gql` + mutation UpdateAlertsMutation($input: UpdateAlertsInput!) { + updateAlerts(input: $input) { + alertID + id + } + } +` + +const escalateAlerts = gql` + mutation EscalateAlertsMutation($input: [Int!]) { + escalateAlerts(input: $input) { + alertID + id + } + } +` + +/* + * On sm-md breakpoints checkbox actions are sticky below toolbar + */ +const stickyBase = { + backgroundColor: 'lightgrey', // same color as background + boxShadow: '0px 0px 0px 3px rgba(211,211,211, 1)', // shadow to overlap list shadow + marginBottom: '0.75em', // push list down below box shadow + marginTop: -48, // height between checkbox and toolbar + paddingTop: '0.5em', // from sidebar.js wrapper padding + position: 'sticky', // stop moving while scrolling + zIndex: 1, // above alerts list +} + +const styles = theme => ({ + ...globalStyles(theme), + hover: { + '&:hover': { + cursor: 'pointer', + }, + }, + icon: { + alignItems: 'center', + display: 'flex', + }, + popper: { + opacity: 1, + }, + whitespace: { + width: 27, + }, + whitespaceXs: { + width: 19, + }, + hidden: { + visibility: 'hidden', + }, + stickySmall: { + ...stickyBase, + top: 56, // toolbar height on small devices + }, + stickyMedium: { + ...stickyBase, + top: 64, // toolbar height on medium devices + }, + stickyLarge: { + ...stickyBase, + marginTop: '-1em', + top: 64, + }, +}) + +const mapStateToProps = state => ({ + actionComplete: state.alerts.actionComplete, + checkedAlerts: state.alerts.checkedAlerts, + filter: alertFilterSelector(state), +}) + +const mapDispatchToProps = dispatch => + bindActionCreators( + { + setCheckedAlerts, + setAlertsActionComplete, + }, + dispatch, + ) + +@withStyles(styles) +@withWidth() +@connect( + mapStateToProps, + mapDispatchToProps, +) +export default class CheckedAlertsFormControl extends Component { + static propTypes = { + cardClassName: p.string, + data: p.shape({ + alert2: p.shape({ + items: p.array, + total_count: p.number, + }), + }), + } + + state = { + errorMessage: '', + updateMessage: '', + } + + visibleAlertIDs = () => { + if (!this.props.data.alerts2) return [] + return this.props.data.alerts2.items + .filter(a => a.status !== 'closed') + .map(a => a.number) + } + + checkedAlertIDs = () => { + const alerts = {} + this.visibleAlertIDs().forEach(id => { + alerts[id] = true + }) + + return this.props.checkedAlerts.filter(id => alerts[id]) + } + + areAllChecked = () => { + return this.visibleAlertIDs().length === this.checkedAlertIDs().length + } + + areNoneChecked = () => { + return this.checkedAlertIDs().length === 0 + } + + // handle resetting selected alerts when visiting another route + componentWillUnmount() { + this.props.setCheckedAlerts([]) + } + + setNone = () => { + this.props.setCheckedAlerts([]) + } + + setAll = () => { + this.props.setCheckedAlerts(this.visibleAlertIDs()) + } + + toggleCheckbox = () => { + if (this.areNoneChecked()) return this.setAll() + + return this.setNone() + } + + updateAlerts = (newStatus, mutation) => { + return mutation({ + variables: { + input: { + alertIDs: this.checkedAlertIDs(), + newStatus, + }, + }, + }) + } + + getSelectOptions = () => { + return [ + { + label: 'All', + onClick: this.setAll, + }, + { + label: 'None', + onClick: this.setNone, + }, + ] + } + + onUpdate = numUpdated => { + this.props.setAlertsActionComplete(true) + this.setState({ + updateMessage: `${numUpdated} of ${ + this.checkedAlertIDs().length + } alerts updated`, + }) + this.props.setCheckedAlerts([]) + this.props.refetch() + } + + getAckButton = () => { + return ( + { + this.props.setAlertsActionComplete(true) + this.setState({ errorMessage: err.message }) + }} + update={(cache, { data }) => this.onUpdate(data.updateAlerts.length)} + > + {mutation => ( + + this.updateAlerts('StatusAcknowledged', mutation)} + > + + + + )} + + ) + } + + getCloseButton = () => { + return ( + { + this.props.setAlertsActionComplete(true) + this.setState({ errorMessage: err.message }) + }} + update={(cache, { data }) => this.onUpdate(data.updateAlerts.length)} + > + {mutation => ( + + this.updateAlerts('StatusClosed', mutation)} + > + + + + )} + + ) + } + + getEscalateButton = () => { + return ( + { + this.props.setAlertsActionComplete(true) + this.setState({ errorMessage: err.message }) + }} + update={(cache, { data }) => this.onUpdate(data.escalateAlerts.length)} + > + {mutation => ( + + + mutation({ + variables: { + input: this.checkedAlertIDs(), + }, + }) + } + > + + + + )} + + ) + } + + renderActionButtons = () => { + const { checkedAlerts, classes, filter } = this.props + if (!checkedAlerts.length) return null + + let ack = null + let close = null + let escalate = null + if ( + filter === 'active' || + filter === 'unacknowledged' || + filter === 'all' + ) { + ack = this.getAckButton() + } + + if (filter !== 'closed') { + close = this.getCloseButton() + escalate = this.getEscalateButton() + } + + return ( + + {ack} + {close} + {escalate} + + ) + } + + render() { + const { actionComplete, classes, width } = this.props + const { errorMessage, updateMessage } = this.state + + // determine classname for container depending on current width breakpoint + let containerClass = null + switch (width) { + case 'xs': + containerClass = classnames(classes.stickySmall) + break + case 'sm': + containerClass = classnames(classes.stickyMedium) + break + default: + containerClass = classnames(classes.stickyLarge) + } + + return [ + this.props.setAlertsActionComplete(false)} + onExited={() => { + this.setState({ errorMessage: '', updateMessage: '' }) + }} + open={actionComplete} + updateMessage={updateMessage} + />, + + + + + + + + + + } + actions={this.getSelectOptions()} + placement='right' + /> + + {this.renderActionButtons()} + , + ] + } +} diff --git a/web/src/app/alerts/components/CreateAlertFab.js b/web/src/app/alerts/components/CreateAlertFab.js new file mode 100644 index 0000000000..84ff7fb09d --- /dev/null +++ b/web/src/app/alerts/components/CreateAlertFab.js @@ -0,0 +1,87 @@ +import React, { Component } from 'react' +import { PropTypes as p } from 'prop-types' +import Button from '@material-ui/core/Button' +import AddIcon from '@material-ui/icons/Add' +import classnames from 'classnames' + +import AlertForm from './AlertForm' +import withStyles from '@material-ui/core/styles/withStyles' + +const styles = theme => ({ + fab: { + position: 'fixed', + bottom: '1em', + right: '1em', + zIndex: 9001, + }, + transitionUp: { + transform: 'translate3d(0, -62px, 0)', + transition: theme.transitions.create('transform', { + duration: theme.transitions.duration.enteringScreen, + easing: theme.transitions.easing.easeOut, + }), + }, + warningTransitionUp: { + transform: 'translate3d(0, -7.75em, 0)', + transition: theme.transitions.create('transform', { + duration: theme.transitions.duration.enteringScreen, + easing: theme.transitions.easing.easeOut, + }), + }, + fabClose: { + transform: 'translate3d(0, 0, 0)', + transition: theme.transitions.create('transform', { + duration: theme.transitions.duration.leavingScreen, + easing: theme.transitions.easing.sharp, + }), + }, +}) + +@withStyles(styles) +export default class CreateAlertFab extends Component { + static propTypes = { + service: p.object, // used for alert form if on a service details page + transition: p.bool, // bool to transition fab up or down from snackbar notification + } + + state = { + showForm: false, + } + + handleShowForm = bool => { + this.setState({ + showForm: bool, + }) + } + + render() { + const { classes, showFavoritesWarning, transition } = this.props + + let fabOpen = classes.transitionUp + // use set padding for the larger verticle height on warning snackbar + if (showFavoritesWarning) { + fabOpen = classes.warningTransitionUp + } + + const transitionClass = transition ? fabOpen : classes.fabClose + + return [ + , + this.handleShowForm(false)} + service={this.props.service} + />, + ] + } +} diff --git a/web/src/app/alerts/components/UpdateAlertsSnackbar.js b/web/src/app/alerts/components/UpdateAlertsSnackbar.js new file mode 100644 index 0000000000..dcf56193a3 --- /dev/null +++ b/web/src/app/alerts/components/UpdateAlertsSnackbar.js @@ -0,0 +1,95 @@ +import React, { Component } from 'react' +import { PropTypes as p } from 'prop-types' +import Snackbar from '@material-ui/core/Snackbar' +import SnackbarContent from '@material-ui/core/SnackbarContent' +import CheckCircleIcon from '@material-ui/icons/CheckCircle' +import CloseIcon from '@material-ui/icons/Close' +import ErrorIcon from '@material-ui/icons/Error' +import IconButton from '@material-ui/core/IconButton' +import withStyles from '@material-ui/core/styles/withStyles' + +const icon = { + fontSize: 20, +} + +const styles = theme => ({ + success: { + backgroundColor: 'green', + }, + error: { + backgroundColor: theme.palette.error.dark, + }, + closeIcon: { + ...icon, + }, + resultIcon: { + ...icon, + opacity: 0.9, + marginRight: theme.spacing.unit, + }, + message: { + display: 'flex', + alignItems: 'center', + }, +}) + +@withStyles(styles) +export default class UpdateAlertsSnackbar extends Component { + static propTypes = { + errorMessage: p.string, + open: p.bool.isRequired, + updateMessage: p.string, + } + + getMessage = () => { + const { classes, errorMessage, updateMessage } = this.props + + if (errorMessage) { + return ( + + + {errorMessage} + + ) + } else { + return ( + + + {updateMessage} + + ) + } + } + + render() { + const { classes, errorMessage, onClose, onExited, open } = this.props + + return ( + + + + , + ]} + /> + + ) + } +} diff --git a/web/src/app/alerts/pages/AlertDetailPage.js b/web/src/app/alerts/pages/AlertDetailPage.js new file mode 100644 index 0000000000..928e1d6d30 --- /dev/null +++ b/web/src/app/alerts/pages/AlertDetailPage.js @@ -0,0 +1,77 @@ +import React, { Component } from 'react' +import { GenericError, ObjectNotFound } from '../../error-pages' +import Spinner from '../../loading/components/Spinner' +import gql from 'graphql-tag' +import { Query } from 'react-apollo' +import AlertDetails from '../components/AlertDetails' +import { POLL_ERROR_INTERVAL, POLL_INTERVAL } from '../../util/poll_intervals' + +const query = gql` + query AlertDetailsPageQuery($id: Int!) { + alert(id: $id) { + number: _id + id + status: status_2 + escalation_level + description + details + summary + service_id + source + assignments { + id + name + } + service { + id + name + escalation_policy_id + } + logs_2 { + event + message + timestamp + } + escalation_policy_snapshot { + repeat + current_level + last_escalation + steps { + delay_minutes + users { + id + name + } + schedules { + id + name + } + } + } + } + } +` + +export default class AlertDetailPage extends Component { + render() { + return ( + + {({ loading, error, data, startPolling }) => { + if (loading) return + if (error) { + startPolling(POLL_ERROR_INTERVAL) + return + } + + if (!data.alert) return + startPolling(POLL_INTERVAL) + return + }} + + ) + } +} diff --git a/web/src/app/alerts/pages/AlertsIndexPage.js b/web/src/app/alerts/pages/AlertsIndexPage.js new file mode 100644 index 0000000000..f7942c6170 --- /dev/null +++ b/web/src/app/alerts/pages/AlertsIndexPage.js @@ -0,0 +1,20 @@ +import React, { Component } from 'react' +import AlertsList from '../components/AlertsList' +import PageActions from '../../util/PageActions' + +import AlertsListFilter from '../components/AlertsListFilter' +import Search from '../../util/Search' + +export default class AlertsIndexPage extends Component { + render() { + return ( + + + + + + + + ) + } +} diff --git a/web/src/app/alerts/queries/AlertsListQuery.js b/web/src/app/alerts/queries/AlertsListQuery.js new file mode 100644 index 0000000000..bfb5db2b94 --- /dev/null +++ b/web/src/app/alerts/queries/AlertsListQuery.js @@ -0,0 +1,61 @@ +import gql from 'graphql-tag' + +export const alertsQuery = gql` + query alerts( + $favorite_services_only: Boolean! + $service_id: String + $search: String + $sort_desc: Boolean! + $limit: Int! + $offset: Int! + $omit_active: Boolean! + $omit_closed: Boolean! + $omit_triggered: Boolean! + $sort_by: AlertSortBy + $favorites_first: Boolean! + $favorites_only: Boolean! + $services_limit: Int! + $services_search: String! + ) { + alerts2( + options: { + favorite_services_only: $favorite_services_only + service_id: $service_id + search: $search + sort_desc: $sort_desc + limit: $limit + offset: $offset + omit_active: $omit_active + omit_closed: $omit_closed + omit_triggered: $omit_triggered + sort_by: $sort_by + } + ) { + total_count + items { + number: _id + id + status: status_2 + created_at + summary + service { + id + name + } + } + } + + services2( + options: { + favorites_first: $favorites_first + favorites_only: $favorites_only + limit: $services_limit + search: $services_search + } + ) { + items { + id + } + } + } +` diff --git a/web/src/app/apollo.js b/web/src/app/apollo.js new file mode 100644 index 0000000000..5f3b59c395 --- /dev/null +++ b/web/src/app/apollo.js @@ -0,0 +1,135 @@ +import ApolloClient from 'apollo-client' +import { ApolloLink } from 'apollo-link' +import { createHttpLink } from 'apollo-link-http' +import { RetryLink } from 'apollo-link-retry' +import { InMemoryCache } from 'apollo-cache-inmemory' +import { camelCase } from 'lodash-es' +import { toIdValue } from 'apollo-utilities' +import { authLogout } from './actions' + +import reduxStore from './reduxStore' + +let pendingMutations = 0 +window.onbeforeunload = function(e) { + if (!pendingMutations) { + return + } + let dialogText = + 'Your changes have not finished saving. If you leave this page, they could be lost.' + e.returnValue = dialogText + return dialogText +} + +const trackMutation = p => { + pendingMutations++ + p.then(() => pendingMutations--, () => pendingMutations--) +} +export function doFetch(body, url = '/v1/graphql') { + const f = fetch(url, { + credentials: 'same-origin', + method: 'POST', + headers: { + Accept: 'application/json', + 'Content-Type': 'application/json', + }, + body, + }) + + if (body.query && body.query.startsWith && body.query.startsWith('mutation')) + trackMutation(f) + + return f.then(res => { + if (res.ok) { + return res + } + + if (res.status === 401) { + reduxStore.dispatch(authLogout()) + } + + throw new Error('HTTP Response ' + res.status + ': ' + res.statusText) + }) +} + +const retryLink = new RetryLink({ + delay: { + initial: 500, + max: 3000, + jitter: true, + }, + attempts: { + max: 5, + retryIf: (error, _operation) => { + // Retry on any error except HTTP Response errors with the + // exception of 502-504 response codes (e.g. no retry on 401/auth etc..). + return ( + !!error && + (!/^HTTP Response \d+:/.test(error.message) || + /^HTTP Response 50[234]:/.test(error.message)) + ) + }, + }, +}) + +const defaultHttpLink = createHttpLink({ + uri: '/v1/graphql', + fetch: (url, opts) => { + return doFetch(opts.body) + }, +}) + +// compose links +const defaultLink = ApolloLink.from([ + retryLink, + defaultHttpLink, // terminating link must be last: apollographql.com/docs/link/overview.html#terminating +]) + +export const graphql1Client = new ApolloClient({ + link: defaultLink, + cache: new InMemoryCache(), +}) + +const graphql2HttpLink = createHttpLink({ + uri: '/api/graphql', + fetch: (url, opts) => { + return doFetch(opts.body, url) + }, +}) + +const graphql2Link = ApolloLink.from([retryLink, graphql2HttpLink]) + +const simpleCacheTypes = [ + 'Alert', + 'Rotation', + 'Schedule', + 'EscalationPolicy', + 'Service', + 'User', + 'SlackChannel', +] + +// tell Apollo to use cached data for `type(id: foo) {... }` queries +const queryCache = {} +let cache +simpleCacheTypes.forEach(name => { + queryCache[camelCase(name)] = (_, args) => + args && + toIdValue( + cache.config.dataIdFromObject({ + __typename: name, + id: args.id, + }), + ) +}) +cache = new InMemoryCache({ + cacheRedirects: { + Query: { + ...queryCache, + }, + }, +}) + +export const graphql2Client = new ApolloClient({ + link: graphql2Link, + cache, +}) diff --git a/web/src/app/config/index.js b/web/src/app/config/index.js new file mode 100644 index 0000000000..15d25de515 --- /dev/null +++ b/web/src/app/config/index.js @@ -0,0 +1,11 @@ +export const ITEMS_PER_PAGE = 15 +export default { + BASE_API_URL: process.env.GO_ALERT_BASE_API_URL || '/api', +} +export const POLL_INTERVAL = global.Cypress ? 1000 : 3500 +export const POLL_ERROR_INTERVAL = global.Cypress ? 1000 : 30000 + +export const DEFAULT_SPIN_DELAY_MS = 200 +export const DEFAULT_SPIN_WAIT_MS = 1500 + +export const DEBOUNCE_DELAY = global.Cypress ? 50 : 250 diff --git a/web/src/app/contact-methods/components/ContactMethodForm.js b/web/src/app/contact-methods/components/ContactMethodForm.js new file mode 100644 index 0000000000..b146b4c7d6 --- /dev/null +++ b/web/src/app/contact-methods/components/ContactMethodForm.js @@ -0,0 +1,409 @@ +import React, { Component } from 'react' +import p from 'prop-types' +import FormControl from '@material-ui/core/FormControl' +import FormHelperText from '@material-ui/core/FormHelperText' +import Grid from '@material-ui/core/Grid' +import InputLabel from '@material-ui/core/InputLabel' +import MenuItem from '@material-ui/core/MenuItem' +import Select from '@material-ui/core/Select' +import TextField from '@material-ui/core/TextField' +import VerificationForm from './VerificationForm' +import gql from 'graphql-tag' +import { withApollo } from 'react-apollo' +import ApolloFormDialog from '../../dialogs/components/ApolloFormDialog' +import { createNotificationRuleMutation } from '../../notification-rules/components/CreateNotificationRuleForm' + +const createContactMethodMutation = gql` + mutation CreateContactMethodMutation($input: CreateContactMethodInput) { + createContactMethod(input: $input) { + id + name + type + value + disabled + } + } +` + +const updateContactmethodMutation = gql` + mutation UpdateContactMethodMutation($input: UpdateContactMethodInput) { + updateContactMethod(input: $input) { + id + name + type + value + disabled + } + } +` + +const types = ['SMS', 'VOICE'] +const countryCodeOptions = [ + { + label: '+1 (United States of America)', + value: '+1', + length: 10, + }, + { + label: '+91 (India)', + value: '+91', + length: 10, + }, +] + +const fieldStyle = { + width: '100%', +} + +const getPhoneLen = code => + countryCodeOptions.find(o => o.value === code).length + +const splitNumber = phone => { + const cc = countryCodeOptions.find(o => phone.startsWith(o.value)) + if (!cc) { + throw new Error('invalid or unknown country code for number: ' + phone) + } + return { + cc: cc.value, + phone: phone.slice(cc.value.length), + } +} + +class ContactMethodForm extends Component { + static propTypes = { + id: p.string, + type: p.string, + value: p.string, + name: p.string, + open: p.bool, + userId: p.string, + existing: p.arrayOf( + p.shape({ + id: p.string.isRequired, + type: p.string.isRequired, + name: p.string.isRequired, + }), + ).isRequired, + handleRequestClose: p.func.isRequired, + cmDisabled: p.bool, + } + + constructor(props) { + super(props) + + const type = props.type || 'SMS' + const value = props.value || '' + + let phone = '' + let cc = '+1' + + if ((type === 'SMS' || type === 'VOICE') && value.length > 2) { + const n = splitNumber(value) + phone = n.phone + cc = n.cc + } + + this.state = { + name: props.name || '', + type: type, + countryCode: cc, + phone: phone, + submitted: false, + readOnly: false, + contactMethod: {}, + } + } + + shouldSubmit = () => { + this.setState({ submitted: true }) + + const shouldSubmit = !(this.getNameError(true) || this.getValueError()) + if (shouldSubmit) { + this.setState({ readOnly: true }) + return true + } + + return false + } + + getVariables = () => { + if (this.props.id) { + return { + input: { + id: this.props.id, + disabled: this.props.cmDisabled, + name: this.state.name, + type: this.state.type, + value: this.getValue(), + }, + } + } else { + return { + input: { + user_id: this.props.userId, + name: this.state.name, + type: this.state.type, + value: this.getValue(), + }, + } + } + } + + // update cache + createNotificationRule = cm => { + return this.props.client + .mutate({ + mutation: createNotificationRuleMutation, + variables: { + input: { + user_id: this.props.userId, + delay_minutes: 0, + contact_method_id: cm.id, + }, + }, + }) + .catch(err => console.error(err)) + } + + onCreateCMSuccess = (cache, data) => { + this.setState({ submitted: false }) + const cm = data.createContactMethod + if (!cm) return // don't need to update cache on an update vs. create + this.setState({ contactMethod: cm, showVerifyForm: cm.disabled }) + + // if contact method enabled and no notification rules, create notification rule + if ( + !cm.disabled && + (!this.props.notificationRules || + this.props.notificationRules.length === 0) + ) { + this.createNotificationRule(cm) + } + } + + onVerificationSuccess = () => { + // create notification that notifies immediately for new user's contact method + this.createNotificationRule(this.state.contactMethod) + } + + renderVerificationForm() { + if (!this.state.showVerifyForm) return null + const { contactMethod, showVerifyForm } = this.state + return ( + this.setState({ showVerifyForm: false })} + onSuccess={() => this.onVerificationSuccess()} + /> + ) + } + + getValue() { + switch (this.state.type) { + case 'SMS': + case 'VOICE': + return this.state.countryCode + this.state.phone + } + } + + getNameError(submitted = this.state.submitted) { + const name = this.state.name.trim() + if ( + submitted && + this.props.existing.some( + e => + e.type === this.state.type && + e.id !== this.props.id && + e.name === name, + ) + ) { + return 'Name must be unique for a given type.' + } + + if (submitted && !name) { + return 'A name is required.' + } + } + + getValueError() { + switch (this.state.type) { + case 'SMS': + case 'VOICE': + return this.getPhoneError(true) + } + } + + getPhoneError(submitted = this.state.submitted) { + // The only invalid case is too few digits, since we filter inputs. + const len = getPhoneLen(this.state.countryCode) + if (submitted && this.state.phone.length < len) { + return 'Enter a ' + len + ' digit number (including area code).' + } + } + + filterSetPhone(newVal) { + this.setState({ phone: newVal.replace(/[^0-9]/g, '').slice(0, 10) }) + } + + renderFields() { + const { name, type, countryCode } = this.state + + let selectField = ( + + ) + + return ( + + + + this.setState({ name: e.target.value })} + placeholder='Personal, Work, Home...' + value={name} + /> + {this.getNameError()} + + + + + Type + {selectField} + + + + + Country Code + + + + + + this.filterSetPhone(e.target.value)} + type='tel' + value={this.state.phone} + /> + {this.getPhoneError()} + + + + ) + } + + resetForm = () => { + const type = this.props.type || 'SMS' + const value = this.props.value || '' + + let phone = '' + let cc = '+1' + + if ((type === 'SMS' || type === 'VOICE') && value.length > 2) { + const n = splitNumber(value) + phone = n.phone + cc = n.cc + } + + this.setState({ + name: this.props.name || '', + type: type, + countryCode: cc, + phone: phone, + submitted: false, + readOnly: false, + }) + } + + render() { + const { open, newUser, id } = this.props + + const newUserText = 'To get started, please enter a contact method.' + const newUserCaption = + 'By entering your contact information, you agree to receive auto-dialed ' + + 'and prerecorded alert calls or texts from Target or those acting on behalf of Target Corporation.' + + let title = 'Add New Contact Method' + if (newUser) { + title = 'Welcome to GoAlert!' + } else if (id) { + title = 'Edit Contact Method' + } + + return [ + this.setState({ readOnly: false })} + caption={newUser ? newUserCaption : null} + disableCancel={newUser} + fields={this.renderFields()} + getVariables={this.getVariables} + mutation={ + id ? updateContactmethodMutation : createContactMethodMutation + } + onRequestClose={this.props.handleRequestClose} + onSuccess={this.onCreateCMSuccess} + open={open} + resetForm={this.resetForm} + shouldSubmit={this.shouldSubmit} + subtitle={newUser ? newUserText : null} + title={title} + />, + this.renderVerificationForm(), + ] + } +} + +export default withApollo(ContactMethodForm) diff --git a/web/src/app/contact-methods/components/VerificationForm.js b/web/src/app/contact-methods/components/VerificationForm.js new file mode 100644 index 0000000000..f857882253 --- /dev/null +++ b/web/src/app/contact-methods/components/VerificationForm.js @@ -0,0 +1,200 @@ +import React, { Component } from 'react' +import p from 'prop-types' +import FormControl from '@material-ui/core/FormControl' +import FormHelperText from '@material-ui/core/FormHelperText' +import Grid from '@material-ui/core/Grid' +import TextField from '@material-ui/core/TextField' +import LoadingButton from '../../loading/components/LoadingButton' +import ApolloFormDialog from '../../dialogs/components/ApolloFormDialog' +import gql from 'graphql-tag' +import { Mutation } from 'react-apollo' + +const verifyContactMethodMutation = gql` + mutation VerifyContactMethodMutation($input: VerifyContactMethodInput) { + verifyContactMethod(input: $input) { + contact_method_ids + } + } +` + +const sendContactMethodVerificationMutation = gql` + mutation SendContactMethodVerificationMutation( + $input: SendContactMethodVerificationInput + ) { + sendContactMethodVerification(input: $input) { + id + } + } +` + +const fieldStyle = { + width: '100%', +} + +function formatNumber(n) { + if (n.startsWith('+1')) { + return `+1 (${n.slice(2, 5)}) ${n.slice(5, 8)}-${n.slice(8)}` + } + if (n.startsWith('+91')) { + return `+91-${n.slice(3, 5)}-${n.slice(5, 8)}-${n.slice(8)}` + } else { + return {n} + } +} + +export default class VerificationForm extends Component { + static propTypes = { + id: p.string, + open: p.bool, + userId: p.string, + handleRequestClose: p.func.isRequired, + } + + constructor(props) { + super(props) + + this.state = { + code: '', + submitted: false, + readOnly: false, + resend: false, + sendError: '', + loading: false, + } + } + + shouldSubmit = () => { + this.setState({ submitted: true }) + + const shouldSubmit = !this.getCodeError(true) + if (shouldSubmit) { + this.setState({ readOnly: true }) + return true + } + + return false + } + + sendCode = mutation => { + this.setState({ loading: true }) + mutation({ + variables: { + input: { + contact_method_id: this.props.id, + }, + }, + }) + } + + getCodeError(submitted = this.state.submitted) { + const code = this.state.code.trim() + if (submitted && !code) { + return 'Code is required' + } + if ((submitted && code.length !== 6) || (submitted && code.match(/\D/))) { + return 'Enter the 6-digit numeric code' + } + } + + getTitle() { + if (this.state.resend) { + return 'Resend Code' + } else { + return 'Send Code' + } + } + + renderFields() { + const { code, loading, readOnly } = this.state + + return ( + + + + this.setState({ resend: true, sendError: '', loading: false }) + } + onError={() => + this.setState({ + loading: false, + sendError: 'Too many messages! Try again after some time.', + }) + } + > + {mutation => ( + this.sendCode(mutation)} + /> + )} + + + + + + this.setState({ + code: e.target.value.replace(/\D/, '').slice(0, 6), + }) + } + placeholder='Enter the verification code received' + value={code} + /> + {this.getCodeError()} + + + + ) + } + + resetForm = () => { + this.setState({ + sendError: '', + code: '', + submitted: false, + readOnly: false, + loading: false, + }) + } + + render() { + const { open } = this.props + const title = 'Verify Contact Method by ' + this.props.type + const subtitle = `Verifying "${this.props.name}" at ${formatNumber( + this.props.value, + )}` + return ( + this.setState({ readOnly: false })} + errorMessage={this.state.sendError} + fields={this.renderFields()} + getVariables={() => ({ + input: { + contact_method_id: this.props.id, + verification_code: parseInt(this.state.code), + }, + })} + mutation={verifyContactMethodMutation} + onRequestClose={this.props.handleRequestClose} + open={open} + resetForm={this.resetForm} + shouldSubmit={this.shouldSubmit} + subtitle={subtitle} + title={title} + /> + ) + } +} diff --git a/web/src/app/details/DetailsPage.js b/web/src/app/details/DetailsPage.js new file mode 100644 index 0000000000..f361df66ba --- /dev/null +++ b/web/src/app/details/DetailsPage.js @@ -0,0 +1,149 @@ +import React from 'react' +import p from 'prop-types' +import { connect } from 'react-redux' +import { absURLSelector } from '../selectors/url' +import statusStyles from '../util/statusStyles' +import withStyles from '@material-ui/core/styles/withStyles' +import Card from '@material-ui/core/Card' +import CardContent from '@material-ui/core/CardContent' +import Grid from '@material-ui/core/Grid' +import Typography from '@material-ui/core/Typography' +import { Link } from 'react-router-dom' +import { ChevronRight } from '@material-ui/icons' +import List from '@material-ui/core/List' +import ListItem from '@material-ui/core/ListItem' +import ListItemText from '@material-ui/core/ListItemText' +import ListItemSecondaryAction from '@material-ui/core/ListItemSecondaryAction' +import IconButton from '@material-ui/core/IconButton' +import Markdown from '../util/Markdown' + +const styles = theme => ({ + ...statusStyles, + spacing: { + '&:not(:first-child)': { + marginTop: 8, + }, + marginBottom: 8, + }, + iconContainer: { + [theme.breakpoints.down('sm')]: { float: 'top' }, + [theme.breakpoints.up('md')]: { float: 'left' }, + margin: 20, + }, +}) + +const mapStateToProps = state => { + return { + absURL: absURLSelector(state), + } +} + +@withStyles(styles) +@connect(mapStateToProps) +export default class DetailsPage extends React.PureComponent { + static propTypes = { + title: p.string.isRequired, + details: p.string.isRequired, + + icon: p.node, + links: p.arrayOf( + p.shape({ + label: p.string.isRequired, + url: p.string.isRequired, + status: p.oneOf(['ok', 'warn', 'err']), + subText: p.node, + }), + ), + + titleFooter: p.any, + pageFooter: p.any, + } + + renderLink = ({ url, label, status, subText }, idx) => { + const { classes, absURL } = this.props + let itemClass = classes.noStatus + switch (status) { + case 'ok': + itemClass = classes.statusOK + break + case 'warn': + itemClass = classes.statusWarning + break + case 'err': + itemClass = classes.statusError + break + } + + return ( + + + + + + + + + ) + } + + renderLinks() { + const { links } = this.props + + if (!links || !links.length) return null + + return ( + + + {links.map(this.renderLink)} + + + ) + } + + render() { + const { + title, + details, + icon, + titleFooter, + pageFooter, + classes, + } = this.props + return ( + + + + + {icon &&
{icon}
} + {title} + + + + {titleFooter && ( + {titleFooter} + )} +
+
+
+ + {this.renderLinks()} + + {pageFooter && ( + + {pageFooter} + + )} +
+ ) + } +} diff --git a/web/src/app/dialogs/FormDialog.js b/web/src/app/dialogs/FormDialog.js new file mode 100644 index 0000000000..c83e231ce6 --- /dev/null +++ b/web/src/app/dialogs/FormDialog.js @@ -0,0 +1,218 @@ +import React from 'react' +import p from 'prop-types' +import withStyles from '@material-ui/core/styles/withStyles' +import Button from '@material-ui/core/Button' +import Dialog from '@material-ui/core/Dialog' +import DialogActions from '@material-ui/core/DialogActions' +import DialogContent from '@material-ui/core/DialogContent' +import Typography from '@material-ui/core/Typography' +import withMobileDialog from '@material-ui/core/withMobileDialog' +import { DefaultTransition, FullscreenTransition } from '../util/Transitions' +import withWidth, { isWidthUp } from '@material-ui/core/withWidth/index' +import LoadingButton from '../loading/components/LoadingButton' +import DialogTitleWrapper from './components/DialogTitleWrapper' +import DialogContentError from './components/DialogContentError' +import { styles as globalStyles } from '../styles/materialStyles' +import gracefulUnmount from '../util/gracefulUnmount' +import { Form } from '../forms' + +const styles = theme => { + const { cancelButton, dialogWidth } = globalStyles(theme) + return { + cancelButton, + dialogWidth, + form: { + height: '100%', // pushes caption to bottom if room is available + }, + formContainer: { + width: '100%', + height: '100%', + display: 'flex', + flexDirection: 'column', + }, + noGrow: { + flexGrow: 0, + }, + } +} + +@withStyles(styles) +@withMobileDialog() +@withWidth() +@gracefulUnmount() +export default class FormDialog extends React.PureComponent { + static propTypes = { + title: p.string.isRequired, + subTitle: p.string, + caption: p.string, + + errors: p.arrayOf( + p.shape({ + message: p.string.isRequired, + }), + ), + + form: p.element, + loading: p.bool, + alert: p.bool, + confirm: p.bool, + maxWidth: p.string, + + // disables form content padding + disableGutters: p.bool, + + onClose: p.func, + onSubmit: p.func, + + // provided by gracefulUnmount() + isUnmounting: p.bool, + onExited: p.func, + + // allow the dialog to grow beyond the normal max-width. + grow: p.bool, + } + + static defaultProps = { + errors: [], + onClose: () => {}, + onSubmit: () => {}, + loading: false, + confirm: false, + caption: '', + maxWidth: 'sm', + } + + render() { + const { + alert, + classes, + confirm, + disableGutters, + errors, + isUnmounting, + loading, + maxWidth, + onClose, + onSubmit, + subTitle, // can't be used in dialogProps spread + title, + width, + ...dialogProps + } = this.props + const isWideScreen = isWidthUp('md', width) + + return ( + + +
{ + e.preventDefault() + if (valid) onSubmit() + }} + > + {this.renderForm()} + {this.renderCaption()} + {this.renderErrors()} + {this.renderActions()} +
+
+ ) + } + + renderForm = () => { + const { classes, disableGutters, form, subTitle } = this.props + + // don't render empty space + if (!form && !subTitle) { + return null + } + + let Component = DialogContent + if (disableGutters) Component = 'div' + + return ( + + {this.renderSubtitle()} + {form} + + ) + } + + renderSubtitle = () => { + if (!this.props.subTitle) return null + + return {this.props.subTitle} + } + + renderCaption = () => { + if (!this.props.caption) return null + + return ( + + {this.props.caption} + + ) + } + + renderErrors = () => { + return this.props.errors.map((err, idx) => ( + + )) + } + + renderActions = () => { + const { alert, confirm, classes, errors, loading, onClose } = this.props + + if (alert) { + return ( + + + + ) + } + + return ( + + + + + ) + } +} diff --git a/web/src/app/dialogs/components/ApolloFormDialog.js b/web/src/app/dialogs/components/ApolloFormDialog.js new file mode 100644 index 0000000000..5202017c8c --- /dev/null +++ b/web/src/app/dialogs/components/ApolloFormDialog.js @@ -0,0 +1,188 @@ +import React, { Component } from 'react' +import { PropTypes as p } from 'prop-types' +import Button from '@material-ui/core/Button' +import Dialog from '@material-ui/core/Dialog' +import DialogActions from '@material-ui/core/DialogActions' +import DialogContent from '@material-ui/core/DialogContent' +import Typography from '@material-ui/core/Typography' +import withStyles from '@material-ui/core/styles/withStyles' +import withMobileDialog from '@material-ui/core/withMobileDialog' +import LoadingButton from '../../loading/components/LoadingButton' +import { styles } from '../../styles/materialStyles' +import { DefaultTransition, FullscreenTransition } from '../../util/Transitions' +import DialogContentError from './DialogContentError' +import DialogTitleWrapper from './DialogTitleWrapper' +import { Mutation } from 'react-apollo' +import classnames from 'classnames' + +/** + * Consumes an apollo mutation (with an updater function, if applicable) + * and renders a dialog with a form, as specified through props. + * + * The form submits to the mutation provided. + */ +@withStyles(styles) +@withMobileDialog() +export default class ApolloFormDialog extends Component { + static propTypes = { + quickAction: p.shape({ + allowEdits: p.func, + fields: p.object.isRequired, + getVariables: p.func, + onRequestClose: p.func.isRequired, + title: p.string.isRequired, + transition: p.func, + subtitle: p.string, + caption: p.string, + contentOnly: p.bool, + disableCancel: p.bool, + mutation: p.object.isRequired, + shouldSubmit: p.func.isRequired, + updater: p.func, + }), + } + + state = { + error: '', + attemptCount: 0, + loading: false, + } + + onError = error => { + this.setState({ + attemptCount: this.state.attemptCount + 1, + error, + loading: false, + }) + + if (typeof this.props.allowEdits === 'function') this.props.allowEdits() + } + + onSubmit = (e, mutation) => { + e.preventDefault() + if (this.state.loading) return // dont allow multiple submissions while loading + const shouldSubmit = this.props.shouldSubmit() // validate fields, set to readOnly while committing, etc + if (shouldSubmit) { + this.setState({ loading: true }) + return mutation({ variables: this.props.getVariables() }).catch(error => + this.onError(error.message), + ) + } + } + + render() { + const loading = this.state.loading + const { + caption, + classes, + contentOnly, + fields, + fullScreen, + mutation, + onRequestClose, + onSuccess, + open, + subtitle, + title, + } = this.props + + let titleJSX + if (title) { + titleJSX = ( + + ) + } + + let subtitleJSX + if (subtitle) { + subtitleJSX = ( + + {subtitle} + + ) + } + + let captionJSX + if (caption) { + captionJSX = ( + + {caption} + + ) + } + + const content = [ + titleJSX, + subtitleJSX, + { + this.setState({ loading: false }) + onRequestClose() + if (typeof onSuccess === 'function') { + onSuccess(cache, data) + } + }} + > + {mutation => ( +
this.onSubmit(e, mutation)} + style={{ width: '100%' }} + > + + {fields} + + {captionJSX} + + + + + + + )} +
, + ] + + if (contentOnly && open) { + return content + } + + return ( + { + if (this.props.resetForm) this.props.resetForm() + this.setState({ error: '', attemptCount: 0, loading: false }) + }} + > + {content} + + ) + } +} diff --git a/web/src/app/dialogs/components/ConfirmationDialog.js b/web/src/app/dialogs/components/ConfirmationDialog.js new file mode 100644 index 0000000000..adbb0ae8eb --- /dev/null +++ b/web/src/app/dialogs/components/ConfirmationDialog.js @@ -0,0 +1,180 @@ +import React, { Component } from 'react' +import { PropTypes as p } from 'prop-types' +import Button from '@material-ui/core/Button' +import Dialog from '@material-ui/core/Dialog' +import DialogActions from '@material-ui/core/DialogActions' +import DialogContent from '@material-ui/core/DialogContent' +import DialogTitle from '@material-ui/core/DialogTitle' +import Typography from '@material-ui/core/Typography' +import withStyles from '@material-ui/core/styles/withStyles' +import LoadingButton from '../../loading/components/LoadingButton' +import { styles } from '../../styles/materialStyles' +import DialogContentError from './DialogContentError' +import { Mutation } from 'react-apollo' + +@withStyles(styles) +export default class ConfirmationDialog extends Component { + static propTypes = { + mutation: p.object, + mutationVariables: p.object, + onMutationSuccess: p.func, + open: p.bool.isRequired, + onRequestClose: p.func.isRequired, + message: p.string, + warning: p.string, + } + + constructor(props) { + super(props) + this.state = { + error: '', + attemptCount: 0, + loading: false, + } + } + + componentWillReceiveProps(nextProps) { + if (!this.props.open && nextProps.open) { + this.setState({ error: '', attemptCount: 0, loading: false }) + } + } + + /* + * Generic submit to handle actions with onClick functions. + * Generally used for relay mutations + */ + confirmAction() { + if (typeof this.props.completeAction !== 'function') return + + this.props + .completeAction() + .then(() => { + if (this._mnt) { + // only set state if still mounted + this.setState({ + error: '', + attemptCount: 0, + loading: false, + }) + } + this.props.onRequestClose(true) // successful action + }) + .catch(err => this.handleError(err)) + } + + handleError = err => { + this.setState({ + error: err.message || err, + attemptCount: this.state.attemptCount + 1, + loading: false, + }) + } + + componentDidMount() { + this._mnt = true + } + + componentWillUnmount() { + this._mnt = false + } + + onMutationSubmit = (e, mutation) => { + e.preventDefault() + if (this.state.loading) return // dont allow multiple submissions while loading + this.setState({ loading: true, error: null }) + return mutation({ variables: this.props.mutationVariables }).catch(error => + this.handleError(error.message), + ) + } + + /* + * Used for Apollo mutations as they are wrapped with a higher order component + * to submit a mutation + */ + renderAsMutation = () => { + const { mutation, onMutationSuccess, onRequestClose } = this.props + + return ( + { + this.setState({ loading: false }) + onRequestClose(true) // success = true prevents no-op set state in some funcs + if (typeof onMutationSuccess === 'function') { + onMutationSuccess(cache, data) + } + }} + > + {mutation => ( + this.onMutationSubmit(e, mutation)} + /> + )} + + ) + } + + renderSubmit = () => { + return ( + { + this.setState({ loading: true, error: null }) + this.confirmAction() + }} + /> + ) + } + + render() { + const { + open, + onRequestClose, + classes, + message, + mutation, + warning, + } = this.props + const { loading } = this.state + + return ( + onRequestClose()} + classes={{ + paper: classes.dialogWidth, + }} + > + Are you sure? + + + {message} + + + {warning} + + + + + + {mutation ? this.renderAsMutation() : this.renderSubmit()} + + + ) + } +} diff --git a/web/src/app/dialogs/components/DialogContentError.js b/web/src/app/dialogs/components/DialogContentError.js new file mode 100644 index 0000000000..c83fd5f52a --- /dev/null +++ b/web/src/app/dialogs/components/DialogContentError.js @@ -0,0 +1,44 @@ +import React, { Component } from 'react' +import { PropTypes as p } from 'prop-types' +import DialogContent from '@material-ui/core/DialogContent' +import Typography from '@material-ui/core/Typography' +import withStyles from '@material-ui/core/styles/withStyles' +import Error from '@material-ui/icons/Error' +import { styles } from '../../styles/materialStyles' +import { Zoom } from '@material-ui/core' + +@withStyles(styles) +export default class DialogContentError extends Component { + static propTypes = { + error: p.string, + noPadding: p.bool, + } + + render() { + const { classes, error, noPadding, ...other } = this.props + const style = noPadding ? { paddingBottom: 0 } : null + + // maintains screen space if no error + if (!error) { + return ( + + +   + + + ) + } + + return ( + + + + +   + {error} + + + + ) + } +} diff --git a/web/src/app/dialogs/components/DialogTitleWrapper.js b/web/src/app/dialogs/components/DialogTitleWrapper.js new file mode 100644 index 0000000000..a302891427 --- /dev/null +++ b/web/src/app/dialogs/components/DialogTitleWrapper.js @@ -0,0 +1,81 @@ +import React, { Component } from 'react' +import { PropTypes as p } from 'prop-types' +import AppBar from '@material-ui/core/AppBar' +import DialogTitle from '@material-ui/core/DialogTitle' +import IconButton from '@material-ui/core/IconButton' +import Toolbar from '@material-ui/core/Toolbar' +import Typography from '@material-ui/core/Typography' +import withStyles from '@material-ui/core/styles/withStyles' +import CloseIcon from '@material-ui/icons/Close' +import DropDownMenu from '../../dialogs/components/DropDownMenu' +import { styles } from '../../styles/materialStyles' + +/** + * Renders a fullscreen dialog with an app bar if on a small + * or mobile screen, and a standard dialog title otherwise. + */ +@withStyles(styles) +export default class DialogTitleWrapper extends Component { + static propTypes = { + fullScreen: p.bool.isRequired, + closeIcon: p.object, + toolbarItems: p.array, // list of JSX items to display on the toolbar + title: p.string.isRequired, + onClose: p.func, + options: p.array, // list of options to display as list items from option icon + } + + render() { + const { + classes, + closeIcon, + fullScreen, + toolbarItems, + onClose, + options, + title, + } = this.props + + let menu + if (options && options.length > 0 && fullScreen) { + menu = + } else if (options && options.length > 0) { + menu = ( +
+ +
+ ) + } + + let closeButton + if (onClose) { + closeButton = ( + + {closeIcon || } + + ) + } + + if (fullScreen) { + return ( + + + {closeButton} + + {title} + + {toolbarItems} + {menu} + + + ) + } else { + return ( + + {title} + {menu} + + ) + } + } +} diff --git a/web/src/app/dialogs/components/DropDownMenu.js b/web/src/app/dialogs/components/DropDownMenu.js new file mode 100644 index 0000000000..62cf4328be --- /dev/null +++ b/web/src/app/dialogs/components/DropDownMenu.js @@ -0,0 +1,64 @@ +import React, { Component } from 'react' +import IconButton from '@material-ui/core/IconButton' +import Menu from '@material-ui/core/Menu' +import MenuItem from '@material-ui/core/MenuItem' +import withStyles from '@material-ui/core/styles/withStyles' +import { MoreVert as MoreVertIcon } from '@material-ui/icons' +import { styles } from '../../styles/materialStyles' + +/* + Takes a list of options each with a label and an onClick function +*/ + +@withStyles(styles) +export default class DropdownMenu extends Component { + constructor(props) { + super(props) + + this.state = { + anchorEl: null, + } + } + render() { + const options = this.props.options + + return ( +
+ this.setState({ anchorEl: event.currentTarget })} + aria-haspopup='true' + style={{ color: this.props.color || 'inherit' }} + > + + + { + if (this._fn) { + this._fn() + this._fn = null + } + }} + onClose={() => { + this.setState({ anchorEl: null }) + }} + > + {options.map(option => ( + { + this.setState({ anchorEl: null }) + this._fn = option.onClick + }} + > + {option.label} + + ))} + +
+ ) + } +} diff --git a/web/src/app/dialogs/components/FormDialog.js b/web/src/app/dialogs/components/FormDialog.js new file mode 100644 index 0000000000..799323cf8e --- /dev/null +++ b/web/src/app/dialogs/components/FormDialog.js @@ -0,0 +1,197 @@ +import React, { Component } from 'react' +import { PropTypes as p } from 'prop-types' +import classnames from 'classnames' +import Button from '@material-ui/core/Button' +import Dialog from '@material-ui/core/Dialog' +import DialogActions from '@material-ui/core/DialogActions' +import DialogContent from '@material-ui/core/DialogContent' +import DialogContentText from '@material-ui/core/DialogContentText' +import Typography from '@material-ui/core/Typography' +import withStyles from '@material-ui/core/styles/withStyles' +import withMobileDialog from '@material-ui/core/withMobileDialog' +import LoadingButton from '../../loading/components/LoadingButton' +import { styles } from '../../styles/materialStyles' +import { DefaultTransition, FullscreenTransition } from '../../util/Transitions' +import DialogContentError from './DialogContentError' +import DialogTitleWrapper from './DialogTitleWrapper' + +@withStyles(styles) +@withMobileDialog() +export default class FormDialog extends Component { + static propTypes = { + quickAction: p.shape({ + fields: p.object.isRequired, + onRequestClose: p.func.isRequired, + onSubmit: p.func.isRequired, + onSuccess: p.func, + title: p.string.isRequired, + transition: p.func, + subtitle: p.string, + caption: p.string, + contentOnly: p.bool, + disableCancel: p.bool, + errorMessage: p.string, + readOnly: p.bool, + }), + } + + constructor(props) { + super(props) + this.state = { + error: '', + attemptCount: 0, + loading: false, + } + } + + submitForm(e) { + e.preventDefault() + if (this.state.loading) return // dont allow multiple submissions while loading + this.setState({ loading: true, error: '' }) + + const result = this.props.onSubmit(e) + + if (!result || typeof result === 'string') { + // if not a promise reject + this.setState({ + error: result, + attemptCount: this.state.attemptCount + 1, + loading: false, + }) + + if (this.props.allowEdits) this.props.allowEdits() + + return + } + + return result + .then(args => { + this.onClose(true) // successful action + if (this.props.onSuccess) this.props.onSuccess(args) // If the function exists run it + }) + .catch(err => { + this.setState({ + error: err.message || err, + attemptCount: this.state.attemptCount + 1, + loading: false, + }) + + if (this.props.allowEdits) this.props.allowEdits() + }) + } + + onClose = (successful = false, clickaway) => { + if (this.state.loading && !successful) return + this.props.onRequestClose(successful, clickaway) + } + + render() { + const loading = this.state.loading + const { + classes, + title, + fields, + fullScreen, + open, + readOnly, + subtitle, + caption, + contentOnly, + } = this.props + + let titleJSX + if (title) { + titleJSX = ( + + ) + } + + let subtitleJSX + if (subtitle) { + subtitleJSX = ( + + {subtitle} + + ) + } + + let captionJSX + if (caption) { + captionJSX = ( + + {caption} + + ) + } + + const content = [ + titleJSX, + subtitleJSX, +
this.submitForm(e)} + style={{ width: '100%' }} + > + + {fields} + + {captionJSX} + + + + { + this.submitForm(e) + }} + /> + + , + ] + + if (contentOnly && open) { + return content + } + + return ( + this.onClose(false, true)} + classes={{ + paper: classnames(classes.dialogWidth, classes.overflowVisible), + }} + fullScreen={fullScreen} + TransitionComponent={ + fullScreen ? FullscreenTransition : DefaultTransition + } + onExited={() => { + if (this.props.resetForm) this.props.resetForm() + this.setState({ error: '', attemptCount: 0, loading: false }) + }} + > + {content} + + ) + } +} diff --git a/web/src/app/documentation/IntegrationKeys.md b/web/src/app/documentation/IntegrationKeys.md new file mode 100644 index 0000000000..4da434824e --- /dev/null +++ b/web/src/app/documentation/IntegrationKeys.md @@ -0,0 +1,72 @@ +# API Reference + +- [Email](#Email) +- [Generic API](#Generic_API) +- [Grafana](#Grafana) + +--- + +## Email + +It is now possible to create an Email integration key from the Service Details page. This will generate a unique email address that can be used for creating alerts. + +De-duplication happens by matching subject and body contents automatically. The email subject line will become the alert summary. + +You can override de-duplication if needed and use a custom key by adding +`+some_value here` +before the "@" symbol. De-duplication behaves similarly to the Grafana and generic API integration keys: if there is an open alert, "duplicate suppressed" is logged, otherwise a new alert is created. + +### Custom Deduplication example + +`b3b16257-75e0-4b9f-9436-db950ec0436c@target.goalert.me` +would become +`b3b16257-75e0-4b9f-9436-db950e 0436c+some_value_here@target.goalert.me` +which would match alerts created for the same service, to the same +`some_value_here` +key, regardless of the subject or body. +On the Service page, Add an Integration Key, select Email and SAVE Copy the Email address and use this with the email-based service that you want to alert on. + +--- + +## Generic API + +### Params can be in query params or body (body takes precedence): + +`summary` -> required, sent as the sms and voice messages +`details` -> optional, additional information about the alert (e.g. links and whatnot) +`action` -> optional, if set to `close`, it will close any matching alerts +`dedup` -> optional, all calls for the same service with the same "dedup" string will update the same alert (if open) or create a new one. Defaults to using summary & details together. +`token` -> the integration key to use + +### Examples: + +```bash +curl -XPOST https:///api/v2/generic/incoming?token=key-here&summary=test +curl -XPOST https:///api/v2/generic/incoming?token=key-here&summary=test&dedup=disk-check +curl -XPOST https:///api/v2/generic/incoming?token=key-here&summary=test&action=close +``` + +--- + +## Grafana + +Grafana provides basic alerting functionality for metrics. + +To trigger an alert using Grafana, follow these steps: + +1. Within GoAlert, on the Services page, select the service you want to process the alert. Under Integration Keys: + + - Key Name: Enter a name for the key. + - Key Type: Grafana + - Click Add Key. Copy the generated URL and keep it handy, as you'll need it in a future step. + +2. In Grafana, click the Grafana icon in the top left and select Alerting > Notification Channels, then click New Channel: + + - Name: Choose a name that makes sense to people outside of your team. + - Type: webhook + - Send on all alerts: Do not select this checkbox unless you want to get paged for every single alert on Visualize. + - Url: Paste in the Grafana webhook URL you generated in step 1. + - Http Method: POST + - Click Send Test to verify that your configuration is correct, then click Save. + +3. Navigate to any of your graph panels on a dashboard, edit the panel, and click the Alert tab. Configure your alerts (if you haven't already), then in the Notifications section of the Alert tab, find the notification channel you just created in the Send to field. Click Save. diff --git a/web/src/app/documentation/components/IntegrationKeyAPI.js b/web/src/app/documentation/components/IntegrationKeyAPI.js new file mode 100644 index 0000000000..f2f66cc6fe --- /dev/null +++ b/web/src/app/documentation/components/IntegrationKeyAPI.js @@ -0,0 +1,45 @@ +import React, { Component } from 'react' +import Card from '@material-ui/core/Card' +import CardContent from '@material-ui/core/CardContent' +import MDReactComponent from 'markdown-react-js' +import markdownText from '../IntegrationKeys.md' + +const handleIterate = (Tag, props, children, level) => { + if (Tag === 'h2') { + props = { + ...props, + id: + typeof children[0] === 'string' + ? children[0].replace(' ', '_') + : children, + } + } + + return {children} +} + +function replaceAll(target, search, replacement) { + return target.split(search).join(replacement) +} + +const replaceString = 'https://' + +export default class IntegrationKeyAPI extends Component { + render() { + const protocol = window.location.protocol || 'https:' + const host = window.location.host + + let finalText = markdownText + if (host) { + finalText = replaceAll(finalText, replaceString, protocol + '//' + host) + } + + return ( + + + + + + ) + } +} diff --git a/web/src/app/error-pages/Errors.js b/web/src/app/error-pages/Errors.js new file mode 100644 index 0000000000..1de626d215 --- /dev/null +++ b/web/src/app/error-pages/Errors.js @@ -0,0 +1,52 @@ +import React, { Component } from 'react' +import Typography from '@material-ui/core/Typography' +import { + SentimentDissatisfied, + SentimentVeryDissatisfied, +} from '@material-ui/icons' + +export class PageNotFound extends Component { + render() { + return ( +
+ + + Sorry, the page you were trying to reach could not be found. + +
+ ) + } +} + +export class ObjectNotFound extends Component { + render() { + return ( +
+ + + Sorry, the {this.props.type || 'thing'} you were looking for could not + be found. + + + Someone may have deleted it, or it never existed. + +
+ ) + } +} + +export class GenericError extends Component { + render() { + let errorText + if (this.props.error) { + errorText = {this.props.error} + } + return ( +
+ + Sorry, an error occurred. + {errorText} +
+ ) + } +} diff --git a/web/src/app/error-pages/index.js b/web/src/app/error-pages/index.js new file mode 100644 index 0000000000..aef505f1ee --- /dev/null +++ b/web/src/app/error-pages/index.js @@ -0,0 +1 @@ +export * from './Errors' diff --git a/web/src/app/escalation-policies/PolicyCreateDialog.js b/web/src/app/escalation-policies/PolicyCreateDialog.js new file mode 100644 index 0000000000..6e740cf45c --- /dev/null +++ b/web/src/app/escalation-policies/PolicyCreateDialog.js @@ -0,0 +1,91 @@ +import React, { PureComponent } from 'react' +import p from 'prop-types' +import { graphql2Client } from '../apollo' +import gql from 'graphql-tag' +import { Mutation } from 'react-apollo' +import { fieldErrors, nonFieldErrors } from '../util/errutil' +import { Redirect } from 'react-router-dom' +import FormDialog from '../dialogs/FormDialog' +import PolicyForm from './PolicyForm' + +const mutation = gql` + mutation($input: CreateEscalationPolicyInput!) { + createEscalationPolicy(input: $input) { + id + } + } +` + +export default class PolicyCreateDialog extends PureComponent { + static propTypes = { + onClose: p.func, + } + + state = { + value: null, + errors: [], + } + + renderDialog(commit, status) { + const { loading, data, error } = status + const { value } = this.state + + if (data && data.createEscalationPolicy) { + return ( + + ) + } + + const fieldErrs = fieldErrors(error) + const defaultValue = { + name: '', + description: '', + repeat: { label: '3', value: '3' }, + } + + return ( + { + return commit({ + variables: { + input: { + name: (value && value.name) || defaultValue.name, + description: + (value && value.description) || defaultValue.description, + repeat: + (value && value.repeat.value) || defaultValue.repeat.value, + }, + }, + }) + }} + form={ + this.setState({ value })} + /> + } + /> + ) + } + + render() { + return ( + + {(commit, status) => this.renderDialog(commit, status)} + + ) + } +} diff --git a/web/src/app/escalation-policies/PolicyDeleteDialog.js b/web/src/app/escalation-policies/PolicyDeleteDialog.js new file mode 100644 index 0000000000..8871d34a3d --- /dev/null +++ b/web/src/app/escalation-policies/PolicyDeleteDialog.js @@ -0,0 +1,63 @@ +import React from 'react' +import p from 'prop-types' +import { graphql2Client } from '../apollo' +import gql from 'graphql-tag' +import { Mutation } from 'react-apollo' +import { Redirect } from 'react-router-dom' +import FormDialog from '../dialogs/FormDialog' + +const mutation = gql` + mutation($input: [TargetInput!]!) { + deleteAll(input: $input) + } +` + +export default class PolicyDeleteDialog extends React.PureComponent { + static propTypes = { + escalationPolicyID: p.string.isRequired, + onClose: p.func, + } + + renderDialog = (commit, mutStatus) => { + const { loading, error, data } = mutStatus + if (data && data.deleteAll) { + return + } + + return ( + { + return commit({ + variables: { + input: [ + { + type: 'escalationPolicy', + id: this.props.escalationPolicyID, + }, + ], + }, + }) + }} + /> + ) + } + + render() { + return ( + ['epsQuery']} + > + {(commit, status) => this.renderDialog(commit, status)} + + ) + } +} diff --git a/web/src/app/escalation-policies/PolicyDetails.js b/web/src/app/escalation-policies/PolicyDetails.js new file mode 100644 index 0000000000..fc935238ed --- /dev/null +++ b/web/src/app/escalation-policies/PolicyDetails.js @@ -0,0 +1,109 @@ +import React, { PureComponent } from 'react' +import p from 'prop-types' +import gql from 'graphql-tag' +import PageActions from '../util/PageActions' +import Query from '../util/Query' +import PolicyStepsQuery from './PolicyStepsQuery' +import OtherActions from '../util/OtherActions' +import PolicyDeleteDialog from './PolicyDeleteDialog' +import CreateFAB from '../lists/CreateFAB' +import PolicyStepCreateDialog from './PolicyStepCreateDialog' +import DetailsPage from '../details/DetailsPage' +import PolicyEditDialog from './PolicyEditDialog' +import { setURLParam } from '../actions/main' +import { connect } from 'react-redux' +import { urlParamSelector } from '../selectors' +import { resetURLParams } from '../actions' + +const query = gql` + query($id: ID!) { + escalationPolicy(id: $id) { + id + name + description + } + } +` + +@connect( + state => ({ + createStep: urlParamSelector(state)('createStep'), + }), + dispatch => ({ + setCreateStep: value => dispatch(setURLParam('createStep', value, false)), + resetCreateStep: () => dispatch(resetURLParams('createStep')), + }), +) +export default class PolicyDetails extends PureComponent { + static propTypes = { + escalationPolicyID: p.string.isRequired, + } + + state = { + delete: false, + edit: false, + } + + renderData = ({ data }) => { + return ( + + + this.setState({ edit: true }), + }, + { + label: 'Delete Escalation Policy', + onClick: () => this.setState({ delete: true }), + }, + ]} + /> + + + } + /> + this.props.setCreateStep(true)} /> + {this.props.createStep && ( + + )} + {this.state.edit && ( + this.setState({ edit: false })} + /> + )} + {this.state.delete && ( + this.setState({ delete: false })} + /> + )} + + ) + } + + render() { + return ( + + ) + } +} diff --git a/web/src/app/escalation-policies/PolicyEditDialog.js b/web/src/app/escalation-policies/PolicyEditDialog.js new file mode 100644 index 0000000000..5c78877f6f --- /dev/null +++ b/web/src/app/escalation-policies/PolicyEditDialog.js @@ -0,0 +1,113 @@ +import React, { PureComponent } from 'react' +import p from 'prop-types' +import { graphql2Client } from '../apollo' +import gql from 'graphql-tag' +import { Mutation } from 'react-apollo' +import { fieldErrors, nonFieldErrors } from '../util/errutil' +import Query from '../util/Query' +import FormDialog from '../dialogs/FormDialog' +import PolicyForm from './PolicyForm' + +const query = gql` + query($id: ID!) { + escalationPolicy(id: $id) { + id + name + description + repeat + } + } +` + +const mutation = gql` + mutation($input: UpdateEscalationPolicyInput!) { + updateEscalationPolicy(input: $input) + } +` + +export default class PolicyEditDialog extends PureComponent { + static propTypes = { + escalationPolicyID: p.string.isRequired, + onClose: p.func, + } + + state = { + value: null, + errors: [], + } + + renderMutation = defaultValue => { + return ( + [ + { + query, + variables: { id: this.props.escalationPolicyID }, + }, + ]} + > + {(commit, status) => this.renderDialog(defaultValue, commit, status)} + + ) + } + + renderDialog(defaultValue, commit, status) { + const { loading, error } = status + const { value } = this.state + const fieldErrs = fieldErrors(error) + + return ( + { + return commit({ + variables: { + input: { + id: this.props.escalationPolicyID, + name: (value && value.name) || defaultValue.name, + description: + (value && value.description) || defaultValue.description, + repeat: + (value && value.repeat.value) || defaultValue.repeat.value, + }, + }, + }) + }} + form={ + this.setState({ value })} + /> + } + /> + ) + } + + render() { + return ( + { + const { id, name, description, repeat } = data.escalationPolicy || {} + return this.renderMutation({ + id, + name, + description, + repeat: { label: repeat.toString(), value: repeat.toString() }, + }) + }} + /> + ) + } +} diff --git a/web/src/app/escalation-policies/PolicyForm.js b/web/src/app/escalation-policies/PolicyForm.js new file mode 100644 index 0000000000..80d6d563c4 --- /dev/null +++ b/web/src/app/escalation-policies/PolicyForm.js @@ -0,0 +1,83 @@ +import React, { PureComponent } from 'react' +import { PropTypes as p } from 'prop-types' +import Grid from '@material-ui/core/Grid' +import TextField from '@material-ui/core/TextField' +import { FormContainer, FormField } from '../forms' +import MaterialSelect from '../selection/MaterialSelect' + +export default class PolicyForm extends PureComponent { + static propTypes = { + value: p.shape({ + name: p.string, + description: p.string, + repeat: p.shape({ + label: p.string.isRequired, + value: p.string.isRequired, + }).isRequired, + }).isRequired, + + errors: p.arrayOf( + p.shape({ + field: p.oneOf(['name', 'description', 'repeat']).isRequired, + message: p.string.isRequired, + }), + ), + + disabled: p.bool, + onChange: p.func, + } + + render() { + return ( + + + + + + + + + + + + + + ) + } +} diff --git a/web/src/app/escalation-policies/PolicyRouter.js b/web/src/app/escalation-policies/PolicyRouter.js new file mode 100644 index 0000000000..c71508f847 --- /dev/null +++ b/web/src/app/escalation-policies/PolicyRouter.js @@ -0,0 +1,65 @@ +import React, { PureComponent } from 'react' +import gql from 'graphql-tag' +import { Switch, Route } from 'react-router-dom' +import PolicyCreateDialog from './PolicyCreateDialog' +import PolicyDetails from './PolicyDetails' +import PolicyServicesQuery from './PolicyServicesQuery' +import { PageNotFound } from '../error-pages/Errors' +import SimpleListPage from '../lists/SimpleListPage' + +const query = gql` + query epsQuery($input: EscalationPolicySearchOptions) { + data: escalationPolicies(input: $input) { + nodes { + id + name + description + } + pageInfo { + hasNextPage + endCursor + } + } + } +` + +export default class PolicyRouter extends PureComponent { + renderList = () => ( + ({ + title: n.name, + subText: n.description, + url: n.id, + })} + createForm={} + /> + ) + + renderDetails = ({ match }) => ( + + ) + + renderServices = ({ match }) => ( + + ) + + render() { + return ( + + + + + + + ) + } +} diff --git a/web/src/app/escalation-policies/PolicyServicesCard.js b/web/src/app/escalation-policies/PolicyServicesCard.js new file mode 100644 index 0000000000..1a7b7f50a0 --- /dev/null +++ b/web/src/app/escalation-policies/PolicyServicesCard.js @@ -0,0 +1,41 @@ +import React, { Component } from 'react' +import { PropTypes as p } from 'prop-types' +import Card from '@material-ui/core/Card' +import withStyles from '@material-ui/core/styles/withStyles' +import FlatList from '../lists/FlatList' + +const styles = { + card: { + width: '100%', + }, +} + +@withStyles(styles) +export default class PolicyServicesCard extends Component { + static propTypes = { + services: p.arrayOf( + p.shape({ + id: p.string.isRequired, + name: p.string.isRequired, + }), + ).isRequired, + } + + getServicesItems = () => { + return this.props.services.map(service => ({ + title: service.name, + url: `/services/${service.id}`, + })) + } + + render() { + return ( + + + + ) + } +} diff --git a/web/src/app/escalation-policies/PolicyServicesQuery.js b/web/src/app/escalation-policies/PolicyServicesQuery.js new file mode 100644 index 0000000000..557a587bee --- /dev/null +++ b/web/src/app/escalation-policies/PolicyServicesQuery.js @@ -0,0 +1,37 @@ +import React, { PureComponent } from 'react' +import { PropTypes as p } from 'prop-types' +import Query from '../util/Query' +import gql from 'graphql-tag' +import PolicyServicesCard from './PolicyServicesCard' + +const query = gql` + query($id: ID!) { + escalationPolicy(id: $id) { + id + assignedTo { + id + name + } + } + } +` + +export default class PolicyServicesQuery extends PureComponent { + static propTypes = { + escalationPolicyID: p.string.isRequired, + } + + render() { + return ( + ( + + )} + variables={{ id: this.props.escalationPolicyID }} + /> + ) + } +} diff --git a/web/src/app/escalation-policies/PolicyStep.js b/web/src/app/escalation-policies/PolicyStep.js new file mode 100644 index 0000000000..fc4aefd96a --- /dev/null +++ b/web/src/app/escalation-policies/PolicyStep.js @@ -0,0 +1,207 @@ +import React, { Component } from 'react' +import { PropTypes as p } from 'prop-types' +import Chip from '@material-ui/core/Chip' +import Grid from '@material-ui/core/Grid' +import ListItem from '@material-ui/core/ListItem' +import ListItemSecondaryAction from '@material-ui/core/ListItemSecondaryAction' +import Typography from '@material-ui/core/Typography' +import { sortBy } from 'lodash-es' +import { withStyles } from '@material-ui/core/styles' +import { RotationChip, ScheduleChip, UserChip, SlackChip } from '../util/Chips' +import PolicyStepEditDialog from './PolicyStepEditDialog' +import PolicyStepDeleteDialog from './PolicyStepDeleteDialog' +import OtherActions from '../util/OtherActions' +import { setURLParam } from '../actions/main' +import { connect } from 'react-redux' +import { urlParamSelector } from '../selectors' +import { resetURLParams } from '../actions' + +const shapeStep = p.shape({ + id: p.string.isRequired, + delayMinutes: p.number.isRequired, + targets: p.arrayOf( + p.shape({ + id: p.string.isRequired, + name: p.string.isRequired, + type: p.string.isRequired, + }), + ).isRequired, +}) + +const styles = { + centerFlex: { + display: 'flex', + alignItems: 'center', + height: 'fit-content', + }, +} + +@connect( + state => ({ + editStep: urlParamSelector(state)('editStep'), + }), + dispatch => ({ + setEditStep: value => dispatch(setURLParam('editStep', value)), + resetEditStep: () => dispatch(resetURLParams('editStep')), + }), +) +@withStyles(styles) +export default class PolicyStep extends Component { + static propTypes = { + escalationPolicyID: p.string.isRequired, + repeat: p.number.isRequired, // # of times EP repeats escalation process + step: shapeStep.isRequired, + steps: p.arrayOf(shapeStep).isRequired, + } + + state = { + delete: false, + } + + getStepNumber = sid => { + const sids = this.props.steps.map(s => s.id) + return sids.indexOf(sid) + 1 + } + + /* + * Renders the mui chips for each target on the step + */ + renderChips = () => { + const { targets: _t } = this.props.step + + // copy and sort by type then name + const targets = sortBy(_t.slice(), ['type', 'name']) + + if (!targets || targets.length === 0) { + return + } + + const items = targets.map(tgt => { + const tgtChip = Chip => + + let chip = null + switch (tgt.type) { + case 'user': + chip = tgtChip(UserChip) + break + case 'schedule': + chip = tgtChip(ScheduleChip) + break + case 'rotation': + chip = tgtChip(RotationChip) + break + case 'slackChannel': + case 'notificationChannel': + chip = tgtChip(SlackChip) + break + } + + if (chip) { + return ( + + {chip} + + ) + } + }) + + return ( + + {items} + + ) + } + + /* + * Renders the delay message, dependent on if the escalation policy + * repeats, and if the message is rendering on the last step + */ + renderDelayMessage = () => { + const { repeat, step, steps } = this.props + const len = steps.length + const isLastStep = this.getStepNumber(step.id) === len + + // if it's the last step and should not repeat, do not render end text + if (isLastStep && repeat === 0) { + return null + } + + const pluralizer = x => (x === 1 ? '' : 's') + + let repeatText = `Move on to step #${this.getStepNumber(step.id) + + 1} after ${step.delayMinutes} minute${pluralizer(step.delayMinutes)}` + + if (isLastStep && this.getStepNumber(step.id) === 1) { + repeatText = `Repeat after ${step.delayMinutes} minutes` + } + + // repeats + if (isLastStep) { + repeatText = `Go back to step #1 after ${ + step.delayMinutes + } minute${pluralizer(step.delayMinutes)}` + } + + return {repeatText} + } + + render() { + const { + classes, + editStep, + index, + resetEditStep, + setEditStep, + step, + } = this.props + + return ( + + + + + + Step #{this.getStepNumber(step.id)}: + + + + {this.renderChips()} + + + {this.renderDelayMessage()} + + + + setEditStep(step.id), + }, + { + label: 'Delete', + onClick: () => this.setState({ delete: true }), + }, + ]} + positionRelative + /> + + + {editStep === step.id && ( + + )} + {this.state.delete && ( + this.setState({ delete: false })} + stepID={this.props.step.id} + /> + )} + + ) + } +} diff --git a/web/src/app/escalation-policies/PolicyStepCreateDialog.js b/web/src/app/escalation-policies/PolicyStepCreateDialog.js new file mode 100644 index 0000000000..22c0a2411c --- /dev/null +++ b/web/src/app/escalation-policies/PolicyStepCreateDialog.js @@ -0,0 +1,131 @@ +import React from 'react' +import { PropTypes as p } from 'prop-types' +import { graphql2Client } from '../apollo' +import gql from 'graphql-tag' +import { Mutation } from 'react-apollo' +import { fieldErrors, nonFieldErrors } from '../util/errutil' +import PolicyStepForm from './PolicyStepForm' +import FormDialog from '../dialogs/FormDialog' +import { resetURLParams } from '../actions' +import { urlParamSelector } from '../selectors' +import { connect } from 'react-redux' + +const mutation = gql` + mutation($input: CreateEscalationPolicyStepInput!) { + createEscalationPolicyStep(input: $input) { + id + delayMinutes + targets { + id + name + type + } + } + } +` + +const refetchQuery = gql` + query($id: ID!) { + escalationPolicy(id: $id) { + id + steps { + id + delayMinutes + targets { + id + name + type + } + } + } + } +` + +@connect( + state => ({ + errorMessage: urlParamSelector(state)('errorMessage'), + errorTitle: urlParamSelector(state)('errorTitle'), + }), + dispatch => ({ + resetError: () => dispatch(resetURLParams('errorMessage', 'errorTitle')), + }), +) +export default class PolicyStepCreateDialog extends React.Component { + static propTypes = { + escalationPolicyID: p.string.isRequired, + onClose: p.func.isRequired, + } + + state = { + value: null, + errors: [], + } + + renderDialog(defaultValue, commit, status) { + const { errorMessage, errorTitle } = this.props + const { value } = this.state + const { loading, error } = status + const fieldErrs = fieldErrors(error) + + // don't render dialog if slack redirect returns with an error + if (Boolean(errorMessage) || Boolean(errorTitle)) { + return null + } + + return ( + { + return commit({ + variables: { + input: { + escalationPolicyID: this.props.escalationPolicyID, + delayMinutes: parseInt( + (value && value.delayMinutes) || defaultValue.delayMinutes, + ), + targets: (value && value.targets) || defaultValue.targets, + }, + }, + }).then(() => this.props.onClose()) + }} + form={ + this.setState({ value })} + /> + } + /> + ) + } + + render() { + const defaultValue = { + targets: [], + delayMinutes: '15', + } + + return ( + [ + { + query: refetchQuery, + variables: { + id: this.props.escalationPolicyID, + }, + }, + ]} + > + {(commit, status) => this.renderDialog(defaultValue, commit, status)} + + ) + } +} diff --git a/web/src/app/escalation-policies/PolicyStepDeleteDialog.js b/web/src/app/escalation-policies/PolicyStepDeleteDialog.js new file mode 100644 index 0000000000..32438d53eb --- /dev/null +++ b/web/src/app/escalation-policies/PolicyStepDeleteDialog.js @@ -0,0 +1,108 @@ +import React from 'react' +import p from 'prop-types' +import { graphql2Client } from '../apollo' +import gql from 'graphql-tag' +import { Mutation } from 'react-apollo' +import { nonFieldErrors } from '../util/errutil' +import Query from '../util/Query' +import FormDialog from '../dialogs/FormDialog' + +const query = gql` + query($id: ID!) { + escalationPolicy(id: $id) { + id + steps { + id + } + } + } +` + +const mutation = gql` + mutation($input: UpdateEscalationPolicyInput!) { + updateEscalationPolicy(input: $input) + } +` + +export default class PolicyStepDeleteDialog extends React.PureComponent { + static propTypes = { + escalationPolicyID: p.string.isRequired, + stepID: p.string.isRequired, + onClose: p.func, + } + + renderMutation(data) { + return ( + { + const { escalationPolicy } = cache.readQuery({ + query, + variables: { id: this.props.escalationPolicyID }, + }) + cache.writeQuery({ + query, + variables: { id: data.serviceID }, + data: { + escalationPolicy: { + ...escalationPolicy, + steps: (escalationPolicy.steps || []).filter( + step => step.id !== this.props.stepID, + ), + }, + }, + }) + }} + > + {(commit, status) => this.renderDialog(data, commit, status)} + + ) + } + + renderDialog(data, commit, mutStatus) { + const { loading, error } = mutStatus + + // get array of step ids without the step to delete + let sids = data.steps.map(s => s.id) + const toDel = sids.indexOf(this.props.stepID) + sids.splice(toDel, 1) + + return ( + s.id).indexOf(this.props.stepID) + 1) + + ' on this escalation policy.' + } + loading={loading} + errors={nonFieldErrors(error)} + onClose={this.props.onClose} + onSubmit={() => { + return commit({ + variables: { + input: { + id: data.id, + stepIDs: sids, + }, + }, + }) + }} + /> + ) + } + + render() { + return ( + this.renderMutation(data.escalationPolicy)} + /> + ) + } +} diff --git a/web/src/app/escalation-policies/PolicyStepEditDialog.js b/web/src/app/escalation-policies/PolicyStepEditDialog.js new file mode 100644 index 0000000000..de696b6f95 --- /dev/null +++ b/web/src/app/escalation-policies/PolicyStepEditDialog.js @@ -0,0 +1,111 @@ +import React from 'react' +import { PropTypes as p } from 'prop-types' +import { graphql2Client } from '../apollo' +import gql from 'graphql-tag' +import { Mutation } from 'react-apollo' +import { fieldErrors, nonFieldErrors } from '../util/errutil' +import PolicyStepForm from './PolicyStepForm' +import FormDialog from '../dialogs/FormDialog' +import { resetURLParams } from '../actions' +import { urlParamSelector } from '../selectors' +import { connect } from 'react-redux' + +const mutation = gql` + mutation($input: UpdateEscalationPolicyStepInput!) { + updateEscalationPolicyStep(input: $input) + } +` + +@connect( + state => ({ + errorMessage: urlParamSelector(state)('errorMessage'), + errorTitle: urlParamSelector(state)('errorTitle'), + }), + dispatch => ({ + resetError: () => dispatch(resetURLParams('errorMessage', 'errorTitle')), + }), +) +export default class PolicyStepEditDialog extends React.Component { + static propTypes = { + escalationPolicyID: p.string.isRequired, + onClose: p.func.isRequired, + step: p.shape({ + id: p.string.isRequired, + // number from backend, string from textField + delayMinutes: p.oneOfType([p.number, p.string]).isRequired, + targets: p.arrayOf( + p.shape({ + id: p.string.isRequired, + name: p.string.isRequired, + type: p.string.isRequired, + }), + ).isRequired, + }), + } + + state = { + value: null, + errors: [], + } + + renderDialog(defaultValue, commit, status) { + const { errorMessage, errorTitle } = this.props + const { value } = this.state + const { loading, error } = status + const fieldErrs = fieldErrors(error) + + // don't render dialog if slack redirect returns with an error + if (Boolean(errorMessage) || Boolean(errorTitle)) { + return null + } + + return ( + + commit({ + variables: { + input: { + id: this.props.step.id, + delayMinutes: + (value && value.delayMinutes) || defaultValue.delayMinutes, + targets: (value && value.targets) || defaultValue.targets, + }, + }, + }) + } + form={ + this.setState({ value })} + /> + } + /> + ) + } + + render() { + const defaultValue = { + targets: this.props.step.targets.map(({ id, type }) => ({ id, type })), + delayMinutes: this.props.step.delayMinutes.toString(), + } + + return ( + ['stepsQuery']} + onCompleted={this.props.onClose} + > + {(commit, status) => this.renderDialog(defaultValue, commit, status)} + + ) + } +} diff --git a/web/src/app/escalation-policies/PolicyStepForm.js b/web/src/app/escalation-policies/PolicyStepForm.js new file mode 100644 index 0000000000..f8fa2933c2 --- /dev/null +++ b/web/src/app/escalation-policies/PolicyStepForm.js @@ -0,0 +1,272 @@ +import React from 'react' +import { PropTypes as p } from 'prop-types' +import { FormContainer, FormField } from '../forms' +import Badge from '@material-ui/core/Badge' +import Grid from '@material-ui/core/Grid' +import Stepper from '@material-ui/core/Stepper' +import Step from '@material-ui/core/Step' +import StepButton from '@material-ui/core/StepButton' +import StepContent from '@material-ui/core/StepContent' +import TextField from '@material-ui/core/TextField' +import Typography from '@material-ui/core/Typography' +import withStyles from '@material-ui/core/styles/withStyles' +import { + RotationSelect, + ScheduleSelect, + SlackChannelSelect, + UserSelect, +} from '../selection' + +import { + RotateRight as RotationsIcon, + Today as SchedulesIcon, + Group as UsersIcon, +} from '@material-ui/icons' +import { SlackBW as SlackIcon } from '../icons/components/Icons' +import { Config } from '../util/RequireConfig' + +const styles = { + badge: { + top: -1, + right: -1, + backgroundColor: '#cd1831', + }, + optional: { + textAlign: 'left', + }, + label: { + paddingRight: '0.4em', + }, + stepperRoot: { + padding: 0, + }, +} + +@withStyles(styles) +export default class PolicyStepForm extends React.Component { + static propTypes = { + value: p.shape({ + targets: p.arrayOf( + p.shape({ id: p.string.isRequired, type: p.string.isRequired }), + ), + delayMinutes: p.string.isRequired, + }).isRequired, + + errors: p.arrayOf( + p.shape({ + field: p.oneOf(['targets', 'delayMinutes']).isRequired, + message: p.string.isRequired, + }), + ), + + disabled: p.bool, + onChange: p.func, + } + + state = { + step: 0, + } + + handleStepChange = step => () => { + if (step === this.state.step) { + this.setState({ step: null }) // close + } else { + this.setState({ step }) // open + } + } + + render() { + const { classes, disabled, value } = this.props + const { step } = this.state + + // takes a list of { id, type } targets and return the ids for a specific type + const getTargetsByType = type => tgts => + tgts + .filter(t => t.type === type) // only the list of the current type + .map(t => t.id) // array of ID strings + + // takes a list of ids and return a list of { id, type } concatted with the new set of specific types + const makeSetTargetType = curTgts => type => newTgts => + curTgts + .filter(t => t.type !== type) // current targets without any of the current type + .concat(newTgts.map(id => ({ id, type }))) // add the list of current type to the end + + // then form fields would all point to `targets` but can map values + const setTargetType = makeSetTargetType(value.targets) + + const badgeMeUpScotty = (len, txt) => ( + + {txt} + + ) + + const optionalText = ( + + Optional + + ) + + return ( + + + + + {cfg => ( + + + } + optional={optionalText} + onClick={this.handleStepChange(0)} + > + {badgeMeUpScotty( + getTargetsByType('rotation')(value.targets).length, + 'Add Rotations', + )} + + + + + + + } + optional={optionalText} + onClick={this.handleStepChange(1)} + > + {badgeMeUpScotty( + getTargetsByType('schedule')(value.targets).length, + 'Add Schedules', + )} + + + + + + {cfg['Slack.Enable'] && ( + + } + optional={optionalText} + onClick={this.handleStepChange(2)} + > + {badgeMeUpScotty( + getTargetsByType('slackChannel')(value.targets) + .length, + 'Add Slack Channels', + )} + + + + + + )} + + } + optional={optionalText} + onClick={this.handleStepChange(3)} + > + {badgeMeUpScotty( + getTargetsByType('user')(value.targets).length, + 'Add Users', + )} + + + + + + + )} + + + + value.toString()} + hint={ + value.delayMinutes === '0' + ? 'This will cause the step to immediately escalate' + : `This will cause the step to escalate after ${ + value.delayMinutes + }m` + } + /> + + + + ) + } +} diff --git a/web/src/app/escalation-policies/PolicyStepsCard.js b/web/src/app/escalation-policies/PolicyStepsCard.js new file mode 100644 index 0000000000..d0c6d105f5 --- /dev/null +++ b/web/src/app/escalation-policies/PolicyStepsCard.js @@ -0,0 +1,280 @@ +import React, { Component } from 'react' +import { PropTypes as p } from 'prop-types' +import Card from '@material-ui/core/Card' +import CardContent from '@material-ui/core/CardContent' +import Dialog from '@material-ui/core/Dialog' +import List from '@material-ui/core/List' +import Typography from '@material-ui/core/Typography' +import { withStyles } from '@material-ui/core/styles/index' +import withWidth, { isWidthDown } from '@material-ui/core/withWidth' +import { DragDropContext, Droppable, Draggable } from 'react-beautiful-dnd' +import { styles as globalStyles } from '../styles/materialStyles' +import gql from 'graphql-tag' +import PolicyStep from './PolicyStep' +import { Mutation } from 'react-apollo' +import { graphql2Client } from '../apollo' +import DialogTitleWrapper from '../dialogs/components/DialogTitleWrapper' +import DialogContentError from '../dialogs/components/DialogContentError' +import { policyStepsQuery } from './PolicyStepsQuery' + +const styles = theme => { + const { dndDragging } = globalStyles(theme) + + return { + dndDragging, + paddingTop: { + paddingTop: '1em', + }, + } +} + +@withWidth() +@withStyles(styles) +export default class PolicyStepsCard extends Component { + static propTypes = { + escalationPolicyID: p.string.isRequired, + repeat: p.number.isRequired, // # of times EP repeats escalation process + steps: p.arrayOf( + p.shape({ + id: p.string.isRequired, + delayMinutes: p.number.isRequired, + targets: p.arrayOf( + p.shape({ + id: p.string.isRequired, + name: p.string.isRequired, + type: p.string.isRequired, + }), + ).isRequired, + }), + ).isRequired, + } + + state = { + error: null, + } + + oldID = null + oldIdx = null + newIdx = null + + arrayMove = arr => { + const el = arr[this.oldIdx] + arr.splice(this.oldIdx, 1) + arr.splice(this.newIdx, 0, el) + } + + onMutationUpdate = (cache, data) => { + // mutation returns true on a success + if ( + !data.updateEscalationPolicy || + this.oldIdx == null || + this.newIdx == null + ) { + return + } + + // variables for query to read/write from the cache + const variables = { + id: this.props.escalationPolicyID, + } + + // get the current state of the steps in the cache + const { escalationPolicy } = cache.readQuery({ + query: policyStepsQuery, + variables, + }) + + // get steps from cache + const steps = escalationPolicy.steps.slice() + + // if optimistic cache update was successful, return out + if (steps[this.newIdx].id === this.oldID) return + + // re-order escalationPolicy.steps array + this.arrayMove(steps) + + // write new steps order to cache + cache.writeQuery({ + query: policyStepsQuery, + variables, + data: { + escalationPolicy: { + ...escalationPolicy, + steps, + }, + }, + }) + } + + onDragStart = () => { + // adds a little vibration if the browser supports it + if (window.navigator.vibrate) { + window.navigator.vibrate(100) + } + } + + // update step order on ui and send out mutation + onDragEnd = (result, mutation) => { + // dropped outside the list + if (!result.destination) { + return + } + + // map ids to swap elements + let sids = this.props.steps.map(s => s.id) + this.oldID = result.draggableId + this.oldIdx = sids.indexOf(this.oldID) + this.newIdx = result.destination.index + + // re-order sids array + this.arrayMove(sids) + + mutation({ + variables: { + input: { + id: this.props.escalationPolicyID, + stepIDs: sids, + }, + }, + }) + } + + renderRepeatText = () => { + const { repeat, steps } = this.props + + if (!steps.length) { + return null + } + + let text = '' + if (repeat === 0) text = 'Do not repeat' + else if (repeat === 1) text = 'Repeat once' + else text = `Repeat ${repeat} times` + + return {text} + } + + renderNoSteps = () => { + return ( + + No steps currently on this Escalation Policy + + ) + } + + /* + * Renders the steps list with the drag and drop context + * + * Each step will have a grid containing the step number, + * targets (rendered as mui chips), and the delay length + * until the next escalation. + */ + renderStepsList = () => { + const { classes, escalationPolicyID, repeat, steps } = this.props + + if (!steps.length) { + return this.renderNoSteps() + } + + return ( + + Notify the following: + { + this.oldID = null + this.oldIdx = null + this.newIdx = null + }} + optimisticResponse={{ + updateEscalationPolicy: true, + }} + onError={error => this.setState({ error })} + update={(cache, { data }) => this.onMutationUpdate(cache, data)} + > + {mutation => ( + this.onDragEnd(res, mutation)} + > + + {(provided, _) => ( +
+ + {steps.map((step, index) => ( + + {(provided, snapshot) => { + // light grey background while dragging + const draggingBackground = snapshot.isDragging + ? classes.dndDragging + : null + + return ( +
+ +
+ ) + }} +
+ ))} + {provided.placeholder} +
+
+ )} +
+
+ )} +
+
+ ) + } + + render() { + const { message: error } = this.state.error || {} + + return ( + + + + Escalation Steps + {this.renderStepsList()} + {this.renderRepeatText()} + + + this.setState({ error: null })} + > + + + + + ) + } +} diff --git a/web/src/app/escalation-policies/PolicyStepsQuery.js b/web/src/app/escalation-policies/PolicyStepsQuery.js new file mode 100644 index 0000000000..c9f0521a0b --- /dev/null +++ b/web/src/app/escalation-policies/PolicyStepsQuery.js @@ -0,0 +1,47 @@ +import React, { PureComponent } from 'react' +import { PropTypes as p } from 'prop-types' +import Query from '../util/Query' +import gql from 'graphql-tag' +import PolicyStepsCard from './PolicyStepsCard' + +export const policyStepsQuery = gql` + query stepsQuery($id: ID!) { + escalationPolicy(id: $id) { + id + repeat + steps { + id + delayMinutes + targets { + id + name + type + } + } + } + } +` + +export default class PolicyStepsQuery extends PureComponent { + static propTypes = { + escalationPolicyID: p.string.isRequired, + } + + render() { + return ( + { + return ( + + ) + }} + variables={{ id: this.props.escalationPolicyID }} + /> + ) + } +} diff --git a/web/src/app/forms/Form.js b/web/src/app/forms/Form.js new file mode 100644 index 0000000000..ad6a89e520 --- /dev/null +++ b/web/src/app/forms/Form.js @@ -0,0 +1,50 @@ +import React from 'react' +import p from 'prop-types' +import { FormContext } from './context' + +/* + * Form will render a form element and wrap the onSubmit handler + * to check validation on any nested FormContainers rendered as + * descendants. + * + * onSubmit (if provided) will be called with a second `isValid` argument. + */ +export class Form extends React.PureComponent { + static propTypes = { + onSubmit: p.func, + disabled: p.bool, + } + + _checks = [] + + onSubmit = e => { + const valid = !this._checks.some(f => !f()) + return this.props.onSubmit(e, valid) + } + + addSubmitCheck = checkFn => { + this._checks.push(checkFn) + + // return function to un-register it + return () => { + this._checks = this._checks.filter(fn => fn !== checkFn) + } + } + + render() { + const { onSubmit, disabled, ...formProps } = this.props + + return ( +
+ + {this.props.children} + +
+ ) + } +} diff --git a/web/src/app/forms/FormContainer.js b/web/src/app/forms/FormContainer.js new file mode 100644 index 0000000000..0ed77b11ad --- /dev/null +++ b/web/src/app/forms/FormContainer.js @@ -0,0 +1,136 @@ +import React from 'react' +import p from 'prop-types' +import MountWatcher from '../util/MountWatcher' + +import { FormContext, FormContainerContext } from './context' +import { get, set } from 'lodash-es' + +// FormContainer handles grouping multiple FormFields. +// It works with the Form component to handle validation. +export class FormContainer extends React.PureComponent { + static propTypes = { + value: p.object, + errors: p.arrayOf( + p.shape({ + field: p.string.isRequired, + message: p.string.isRequired, + }), + ), + onChange: p.func, + disabled: p.bool, + + mapValue: p.func, + mapOnChangeValue: p.func, + + // If true, will render optional fields with `(optional)` appended to the label. + // In addition, required fields will not be appended with `*`. + optionalLabels: p.bool, + } + + static defaultProps = { + errors: [], + value: {}, + onChange: () => {}, + + mapValue: value => value, + mapOnChangeValue: value => value, + } + + state = { + validationErrors: [], + } + + _fields = {} + + addField = (fieldName, validate) => { + if (!this._fields[fieldName]) { + this._fields[fieldName] = [] + } + this._fields[fieldName].push(validate) + + return () => { + this._fields[fieldName] = this._fields[fieldName].filter( + v => v !== validate, + ) + if (this._fields[fieldName].length === 0) { + delete this._fields[fieldName] + } + } + } + + onSubmit = (...args) => { + const validate = field => { + let err + // find first error + this._fields[field].find(validate => { + err = validate(get(this.props.value, field)) + return err + }) + if (err) err.field = field + return err + } + const validationErrors = Object.keys(this._fields) + .map(validate) + .filter(e => e) + this.setState({ validationErrors }) + if (validationErrors.length) return false + + return true + } + + onChange = (fieldName, e) => { + let value = e + if (e && e.target) { + value = e.target.value + } + const { value: oldValue, mapValue, mapOnChangeValue } = this.props + this.props.onChange( + mapOnChangeValue( + set( + mapValue({ + ...oldValue, + }), + fieldName, + value, + ), + ), + ) + } + + render() { + return {this.renderComponent} + } + + renderComponent = ({ disabled: formDisabled, addSubmitCheck }) => { + const { + value, + mapValue, + optionalLabels, + disabled: containerDisabled, + } = this.props + + return ( + { + this._unregister = addSubmitCheck(this.onSubmit) + }} + onUnmount={() => { + this._unregister() + }} + > + + {this.props.children} + + + ) + } +} diff --git a/web/src/app/forms/FormField.js b/web/src/app/forms/FormField.js new file mode 100644 index 0000000000..1fa807c00e --- /dev/null +++ b/web/src/app/forms/FormField.js @@ -0,0 +1,196 @@ +import React from 'react' +import p from 'prop-types' +import MountWatcher from '../util/MountWatcher' +import FormControl from '@material-ui/core/FormControl' +import FormHelperText from '@material-ui/core/FormHelperText' +import FormLabel from '@material-ui/core/FormLabel' +import { get, isEmpty, startCase } from 'lodash-es' + +import { FormContainerContext } from './context' + +export class FormField extends React.PureComponent { + static propTypes = { + // one of component or render must be provided + component: p.any, + render: p.func, + + // mapValue can be used to map a value before it's passed to the form component + mapValue: p.func, + + // mapOnChangeValue can be used to map a changed value from the component, before it's + // passed to the parent form's state. + mapOnChangeValue: p.func, + + // Adjusts props for usage with a Checkbox component. + checkbox: p.bool, + + // fieldName specifies the field used for + // checking errors, change handlers, and value. + // + // If unset, it defaults to `name`. + name: p.string.isRequired, + fieldName: p.string, + + // used if name is set, + // but the error name is different from graphql responses + errorName: p.string, + + // label above form component + label: p.node, + formLabel: p.bool, // use formLabel instead of label if true + + // required indicates the field may not be left blank. + required: p.bool, + + // validate can be used to provide client-side validation of a + // field. + validate: p.func, + + // a hint for the user on a form field. errors take priority + hint: p.string, + + // disable the form helper text for errors. + noError: p.bool, + } + + static defaultProps = { + validate: () => {}, + mapValue: value => value, + mapOnChangeValue: value => value, + } + + validate = value => { + if ( + this.props.required && + !['boolean', 'number'].includes(typeof value) && + isEmpty(value) + ) { + return new Error('Required field.') + } + + return this.props.validate(value) + } + + render() { + return ( + + {this.renderComponent} + + ) + } + + renderComponent = ({ + errors, + value, + onChange, + addField, + disabled: containerDisabled, + optionalLabels, + ...otherFormProps + }) => { + const { + errorName, + name, + noError, + component: Component, + render, + fieldName: _fieldName, + formLabel, + required, + validate, + disabled: fieldDisabled, + hint, + label: _label, + InputLabelProps: _inputProps, + mapValue, + mapOnChangeValue, + checkbox, + + ...otherFieldProps + } = this.props + + const InputLabelProps = { + required: required && !optionalLabels, + ..._inputProps, + } + + const baseLabel = typeof _label === 'string' ? _label : startCase(name) + const label = + !required && optionalLabels ? baseLabel + ' (optional)' : baseLabel + + const fieldName = _fieldName || name + const props = { + ...otherFormProps, + ...otherFieldProps, + name, + required, + disabled: containerDisabled || fieldDisabled, + error: errors.find(err => err.field === (errorName || fieldName)), + hint, + value: mapValue(get(value, fieldName)), + } + + let getValueOf = e => (e && e.target ? e.target.value : e) + if (checkbox) { + props.checked = props.value + props.value = props.value.toString() + getValueOf = e => e.target.checked + } else if (otherFieldProps.type === 'number') { + props.label = label + props.value = props.value.toString() + props.InputLabelProps = InputLabelProps + getValueOf = e => parseInt(e.target.value, 10) + } else { + props.label = label + props.InputLabelProps = InputLabelProps + } + + props.onChange = value => + onChange(fieldName, mapOnChangeValue(getValueOf(value))) + + return ( + { + this._unregister = addField(fieldName, this.validate) + }} + onUnmount={() => { + this._unregister() + }} + > + {this.renderContent(props)} + + ) + } + + renderContent(props) { + const { + checkbox, + component, + formLabel, + label, + noError, + render, + } = this.props + + if (render) return render(props) + const Component = component + + return ( + + {formLabel && {label}} + + {!noError && (props.error || props.hint) && ( + + {(props.error && + props.error.message.replace(/^./, str => str.toUpperCase())) || + props.hint} + + )} + + ) + } +} diff --git a/web/src/app/forms/README.md b/web/src/app/forms/README.md new file mode 100644 index 0000000000..3d716c0753 --- /dev/null +++ b/web/src/app/forms/README.md @@ -0,0 +1,57 @@ +# Form Components + +Form components handle passing validation and value information to separate 3 main concerns: + +1. Errors and value data/state -- `FormContainer` +2. Individual field layout and validation -- `FormField` +3. Overall form validation & submittion -- `Form` + +### FormContainer + +The `FormContainer` component handles a single form segment. It takes a `value` prop as an object, where the keys +map to descendant `FormField` components. Rather than value, error, onChange, etc.. for each FormField, a `FormContainer` will take an `errors` array and a value and matching onChange handler that deal with a single object. + +- If a `FormContainer` is `disabled` all fields within it will be as well. +- A `FormContainer` will run validation if an outer `Form` component fires it's `onSubmit` handler. Validation passes if all nested `FormField` components are valid. + +### FormField + +The `FormField` component will receive `value` and `error` data from a `FormContainer`. It also registers a `validate` +function, if provided. The field name (used for error checking and the container's value prop) defaults to `name` but can be overriden with the `fieldName` prop. + +### Form + +The `Form` component's job is to simply report when it has been submitted and indicate whether or not all `FormContainer` components pass validation. + +## Basic Usage + +The following exapmle will only call `doMutation` if the user enters 1 or more digits +into the `Foo` text field. + +If the form is submitted with no value, the user will be directed to enter a value. +If the form is submitted with non-digits, an error will be displayed in the form field "Only numbers allowed.". + +```js +
{ + e.preventDefault() + // isValid will be true if all FormContainers report no validation errors + if (isValid) doMutation() + }} +> + + + /^\d+$/.test(value) ? null : new Error('Only numbers allowed.') + } + /> + + // ... Multiple FormContainers can be placed in the same form (e.g. SetupWizard) +
+``` diff --git a/web/src/app/forms/context.js b/web/src/app/forms/context.js new file mode 100644 index 0000000000..3303332883 --- /dev/null +++ b/web/src/app/forms/context.js @@ -0,0 +1,16 @@ +import React from 'react' + +export const FormContainerContext = React.createContext({ + onChange: (field, value) => {}, + disabled: false, + errors: [], + value: {}, + addField: () => () => {}, +}) +FormContainerContext.displayName = 'FormContainerContext' + +export const FormContext = React.createContext({ + disabled: false, + addSubmitCheck: () => () => {}, +}) +FormContext.displayName = 'FormContext' diff --git a/web/src/app/forms/index.js b/web/src/app/forms/index.js new file mode 100644 index 0000000000..b923dfe969 --- /dev/null +++ b/web/src/app/forms/index.js @@ -0,0 +1,3 @@ +export * from './Form' +export * from './FormContainer' +export * from './FormField' diff --git a/web/src/app/history.js b/web/src/app/history.js new file mode 100644 index 0000000000..e5987c4268 --- /dev/null +++ b/web/src/app/history.js @@ -0,0 +1,3 @@ +import { createBrowserHistory } from 'history' + +export default createBrowserHistory() diff --git a/web/src/app/icons/components/Icons.js b/web/src/app/icons/components/Icons.js new file mode 100644 index 0000000000..6efb6ce209 --- /dev/null +++ b/web/src/app/icons/components/Icons.js @@ -0,0 +1,82 @@ +import React, { Component } from 'react' +import Tooltip from '@material-ui/core/Tooltip' +import withStyles from '@material-ui/core/styles/withStyles' +import AddIcon from '@material-ui/icons/Add' +import TrashIcon from '@material-ui/icons/Delete' +import WarningIcon from '@material-ui/icons/Warning' +import except from 'except' +import { styles } from '../../styles/materialStyles' +import slackIcon from '../../public/slack.svg' +import slackIconBW from '../../public/slack_monochrome_black.svg' + +@withStyles(styles) +export class Trash extends Component { + render() { + const { classes } = this.props + + return ( + + ) + } +} + +@withStyles(styles) +export class Warning extends Component { + render() { + const { classes, details } = this.props + + const warningIcon = ( + + ) + + if (!details) { + return warningIcon + } + + return ( + + {warningIcon} + + ) + } +} + +@withStyles(styles) +export class Add extends Component { + render() { + return + } +} + +@withStyles(styles) +export class Slack extends Component { + render() { + return ( + + ) + } +} + +export class SlackBW extends Component { + render() { + return ( + + ) + } +} diff --git a/web/src/app/icons/index.js b/web/src/app/icons/index.js new file mode 100644 index 0000000000..691bcc8b4a --- /dev/null +++ b/web/src/app/icons/index.js @@ -0,0 +1 @@ +export * from './components/Icons' diff --git a/web/src/app/index.js b/web/src/app/index.js new file mode 100644 index 0000000000..2b61056c3f --- /dev/null +++ b/web/src/app/index.js @@ -0,0 +1,63 @@ +/// #if HMR +import './rhl' +/// #endif + +import React from 'react' +import ReactDOM from 'react-dom' +import { Provider as ReduxProvider } from 'react-redux' +import { ConnectedRouter } from 'connected-react-router' +import { ApolloProvider } from 'react-apollo' +import MuiThemeProvider from '@material-ui/core/styles/MuiThemeProvider' +import { theme } from './mui' +import { graphql1Client } from './apollo' +import './styles' +import App from './main/NewApp' +import MuiPickersUtilsProvider from './mui-pickers' +import history from './history' +import store from './reduxStore' +import { GracefulUnmounterProvider } from './util/gracefulUnmount' +import GA from './util/GoogleAnalytics' +import { Config, ConfigProvider } from './util/RequireConfig' + +const LazyGARouteTracker = React.memo(props => { + if (!props.trackingID) { + return null + } + + const GAOptions = { + titleCase: true, + debug: false, + } + + if (!GA.init(props.trackingID, GAOptions)) { + return null + } + + return +}) + +ReactDOM.render( + + + + + + + + {config => ( + + )} + + + + + + + + + + , + document.getElementById('app'), +) diff --git a/web/src/app/links/RotationLink.js b/web/src/app/links/RotationLink.js new file mode 100644 index 0000000000..4dc8346201 --- /dev/null +++ b/web/src/app/links/RotationLink.js @@ -0,0 +1,6 @@ +import React from 'react' +import { Link } from 'react-router-dom' + +export const RotationLink = rotation => { + return {rotation.name} +} diff --git a/web/src/app/links/ScheduleLink.js b/web/src/app/links/ScheduleLink.js new file mode 100644 index 0000000000..283679eba4 --- /dev/null +++ b/web/src/app/links/ScheduleLink.js @@ -0,0 +1,6 @@ +import React from 'react' +import { Link } from 'react-router-dom' + +export const ScheduleLink = schedule => { + return {schedule.name} +} diff --git a/web/src/app/links/ServiceLink.js b/web/src/app/links/ServiceLink.js new file mode 100644 index 0000000000..ea2bf2f346 --- /dev/null +++ b/web/src/app/links/ServiceLink.js @@ -0,0 +1,6 @@ +import React from 'react' +import { Link } from 'react-router-dom' + +export const ServiceLink = service => { + return {service.name} +} diff --git a/web/src/app/links/UserLink.js b/web/src/app/links/UserLink.js new file mode 100644 index 0000000000..88f2bca53f --- /dev/null +++ b/web/src/app/links/UserLink.js @@ -0,0 +1,6 @@ +import React from 'react' +import { Link } from 'react-router-dom' + +export const UserLink = user => { + return {user.name} +} diff --git a/web/src/app/links/index.js b/web/src/app/links/index.js new file mode 100644 index 0000000000..c427d094a3 --- /dev/null +++ b/web/src/app/links/index.js @@ -0,0 +1,4 @@ +export * from './UserLink' +export * from './ScheduleLink' +export * from './ServiceLink' +export * from './RotationLink' diff --git a/web/src/app/lists/CreateFAB.js b/web/src/app/lists/CreateFAB.js new file mode 100644 index 0000000000..93fabce479 --- /dev/null +++ b/web/src/app/lists/CreateFAB.js @@ -0,0 +1,24 @@ +import React from 'react' +import p from 'prop-types' +import AddIcon from '@material-ui/icons/Add' +import Fab from '@material-ui/core/Fab' + +export default class CreateFAB extends React.PureComponent { + static propTypes = { + onClick: p.func, + } + + render() { + return ( + + + + ) + } +} diff --git a/web/src/app/lists/FlatList.js b/web/src/app/lists/FlatList.js new file mode 100644 index 0000000000..e0e9d615f8 --- /dev/null +++ b/web/src/app/lists/FlatList.js @@ -0,0 +1,219 @@ +import React from 'react' +import p from 'prop-types' +import List from '@material-ui/core/List' +import ListItem from '@material-ui/core/ListItem' +import ListItemSecondaryAction from '@material-ui/core/ListItemSecondaryAction' +import ListItemText from '@material-ui/core/ListItemText' +import Typography from '@material-ui/core/Typography' +import { DragDropContext, Droppable, Draggable } from 'react-beautiful-dnd' +import withStyles from '@material-ui/core/styles/withStyles' +import ListSubheader from '@material-ui/core/ListSubheader' +import { Link } from 'react-router-dom' +import { absURLSelector } from '../selectors' +import { connect } from 'react-redux' + +const styles = theme => ({ + highlightedItem: { + borderLeft: '6px solid #93ed94', + background: '#defadf', + }, + background: { backgroundColor: 'white' }, + participantDragging: { + backgroundColor: '#ebebeb', + }, +}) + +const mapStateToProps = state => { + return { + absURL: absURLSelector(state), + } +} + +@withStyles(styles) +@connect(mapStateToProps) +export default class FlatList extends React.PureComponent { + static propTypes = { + // headerNote will be displayed at the top of the list. + headerNote: p.node, + + // emptyMessage will be displayed if there are no items in the list. + emptyMessage: p.string, + + items: p.arrayOf( + p.oneOfType([ + p.shape({ + highlight: p.bool, + title: p.node.isRequired, + subText: p.node, + action: p.element, + url: p.string, + icon: p.element, + id: p.string, // required for drag and drop + }), + p.shape({ + subHeader: p.string.isRequired, + }), + ]), + ), + // If specified, enables drag and drop + // + // onReorder(id, oldIndex, newIndex) + onReorder: p.func, + } + + static defaultProps = { + items: [], + emptyMessage: 'No results', + } + + onDragStart = () => { + // adds a little vibration if the browser supports it + if (window.navigator.vibrate) { + window.navigator.vibrate(100) + } + } + + onDragEnd = result => { + this.props.onReorder( + // result.draggableId, : removed this as per new reorderList function + result.source.index, + result.destination.index, + ) + } + + renderItem(item, idx) { + let itemProps = {} + if (item.url) { + itemProps = { + component: Link, + to: this.props.absURL(item.url), + button: true, + } + } + return ( + + {item.icon} + + {item.action && ( + {item.action} + )} + + ) + } + + renderItems() { + if (!this.props.items.length) { + return ( + + + {this.props.emptyMessage} + + } + /> + + ) + } + + return this.props.items.map((item, idx) => { + if (!this.props.onReorder) { + if (item.subHeader) { + return ( + + + {item.subHeader} + + + ) + } + return this.renderItem(item, idx) + } else + return ( + + {(provided, snapshot) => { + // light grey background while dragging non-active user + const draggingBackground = snapshot.isDragging + ? this.props.classes.participantDragging + : null + return ( +
+ {this.renderItem(item, idx)} +
+ ) + }} +
+ ) + }) + } + + renderList() { + const { + absURL, + dispatch, + onReorder, + classes, + emptyMessage, + headerNote, + items, + ...otherProps + } = this.props + return ( + + {headerNote && ( + + {headerNote} + } + style={{ fontStyle: 'italic' }} + /> + + )} + {this.renderItems()} + + ) + } + + renderDragAndDrop() { + return ( + + + {(provided, _) => ( +
+ {this.renderList()} +
+ )} +
+
+ ) + } + + render() { + if (this.props.onReorder) { + // Enable drag and drop + return this.renderDragAndDrop() + } else { + return this.renderList() + } + } +} diff --git a/web/src/app/lists/PaginatedList.js b/web/src/app/lists/PaginatedList.js new file mode 100644 index 0000000000..313735bb83 --- /dev/null +++ b/web/src/app/lists/PaginatedList.js @@ -0,0 +1,336 @@ +import React from 'react' +import p from 'prop-types' +import withStyles from '@material-ui/core/styles/withStyles' +import withWidth, { isWidthUp } from '@material-ui/core/withWidth/index' + +import Avatar from '@material-ui/core/Avatar' +import FavoriteIcon from '@material-ui/icons/Star' +import Card from '@material-ui/core/Card' +import CircularProgress from '@material-ui/core/CircularProgress' +import Grid from '@material-ui/core/Grid' +import IconButton from '@material-ui/core/IconButton' +import List from '@material-ui/core/List' +import ListItem from '@material-ui/core/ListItem' +import ListItemText from '@material-ui/core/ListItemText' +import Typography from '@material-ui/core/Typography' +import ListItemSecondaryAction from '@material-ui/core/ListItemSecondaryAction' + +import LeftIcon from '@material-ui/icons/ChevronLeft' +import RightIcon from '@material-ui/icons/ChevronRight' +import { Link } from 'react-router-dom' +import { connect } from 'react-redux' + +import { ITEMS_PER_PAGE } from '../config' +import { absURLSelector } from '../selectors/url' + +// gray boxes on load +// disable overflow +// can go to last page + one if loading & hasNextPage +// delete on details -> update list (cache, refetch?) +// - on details, don't have accesses to search param + +const styles = theme => ({ + progress: { + color: theme.palette.secondary['500'], + position: 'absolute', + }, + favoriteIcon: { + backgroundColor: 'transparent', + color: 'grey', + }, + headerNote: { + fontStyle: 'italic', + }, + controls: { + [theme.breakpoints.down('sm')]: { + '&:not(:first-child)': { + marginBottom: '4.5em', + }, + }, + }, +}) + +@withStyles(styles) +class PaginationControls extends React.PureComponent { + static propTypes = { + isLoading: p.bool, + onNext: p.func, + onBack: p.func, + } + + render() { + const { classes, isLoading, onBack, onNext } = this.props + + return ( + + + + { + onBack() + window.scrollTo(0, 0) + }} + > + + + + + { + onNext() + window.scrollTo(0, 0) + }} + > + {isLoading && !onNext && ( + + )} + + + + + + ) + } +} + +const loadingStyle = { + color: 'lightgrey', + background: 'lightgrey', + height: '10.3333px', +} + +// LoadingItem is used as a placeholder for loading content +class LoadingItem extends React.PureComponent { + render() { + const { dense } = this.props + let minHeight = 71 + if (dense) { + minHeight = 57 + } + return ( + + + + + + ) + } +} + +const mapStateToProps = state => { + return { + absURL: absURLSelector(state), + } +} + +@withWidth() +@withStyles(styles) +@connect(mapStateToProps) +export class PaginatedList extends React.PureComponent { + static propTypes = { + // headerNote will be displayed at the top of the list. + headerNote: p.oneOfType([p.string, p.element]), + + items: p.arrayOf( + p.shape({ + url: p.string, + title: p.string.isRequired, + subText: p.string, + isFavorite: p.bool, + icon: p.element, + action: p.element, + }), + ), + + isLoading: p.bool, + loadMore: p.func, + + // disable placeholder display during loading + noPlaceholder: p.bool, + + // provide a message to display if there are no results + emptyMessage: p.string, + } + + static defaultProps = { + emptyMessage: 'No results', + } + + state = { + page: 0, + } + + pageCount = () => Math.ceil((this.props.items || []).length / ITEMS_PER_PAGE) + + // isLoading returns true if the parent says we are, or + // we are currently on an incomplete page and `loadMore` is available. + isLoading() { + if (this.props.isLoading) return true + + // We are on a future/incomplete page and loadMore is true + const itemCount = (this.props.items || []).length + if ( + (this.state.page + 1) * ITEMS_PER_PAGE > itemCount && + this.props.loadMore + ) + return true + + return false + } + + hasNextPage() { + const nextPage = this.state.page + 1 + // Check that we have at least 1 item already for the next page + if (nextPage < this.pageCount()) return true + + // If we're on the last page, not already loading, and can load more + if ( + nextPage === this.pageCount() && + !this.isLoading() && + this.props.loadMore + ) { + return true + } + + return false + } + + onNextPage = () => { + const nextPage = this.state.page + 1 + this.setState({ page: nextPage }) + + // If we're on a not-fully-loaded page, or the last page when > the first page + if ( + (nextPage >= this.pageCount() || + (nextPage > 1 && nextPage + 1 === this.pageCount())) && + this.props.loadMore + ) + this.props.loadMore(ITEMS_PER_PAGE * 2) + } + + renderPaginationControls() { + let onBack = null + let onNext = null + + if (this.state.page > 0) + onBack = () => this.setState({ page: this.state.page - 1 }) + if (this.hasNextPage()) onNext = this.onNextPage + + return ( + + ) + } + + renderNoResults() { + return ( + + {this.props.emptyMessage} + } + /> + + ) + } + + renderItem = (item, idx) => { + const { classes, width, absURL } = this.props + + let favIcon = null + if (item.isFavorite) { + favIcon = ( + + + + + + ) + } + return ( + + {item.icon} + + {favIcon} + {item.action && ( + {item.action} + )} + + ) + } + + renderListItems() { + if (this.pageCount() === 0 && !this.props.isLoading) + return this.renderNoResults() + + const { page } = this.state + const { width, noPlaceholder } = this.props + + const items = (this.props.items || []) + .slice(page * ITEMS_PER_PAGE, (page + 1) * ITEMS_PER_PAGE) + .map(this.renderItem) + + // Display full list when loading + if (!noPlaceholder) { + while (this.isLoading() && items.length < ITEMS_PER_PAGE) { + items.push( + , + ) + } + } + + return items + } + + render() { + const { headerNote, classes } = this.props + return ( + + + + + {headerNote && ( + + + {headerNote} + + } + /> + + )} + {this.renderListItems()} + + + + {this.renderPaginationControls()} + + ) + } +} diff --git a/web/src/app/lists/QueryList.js b/web/src/app/lists/QueryList.js new file mode 100644 index 0000000000..bc9d93ccec --- /dev/null +++ b/web/src/app/lists/QueryList.js @@ -0,0 +1,131 @@ +import React from 'react' +import p from 'prop-types' + +import { PaginatedList } from './PaginatedList' +import { ITEMS_PER_PAGE } from '../config' +import { once } from 'lodash-es' +import { connect } from 'react-redux' +import { searchSelector } from '../selectors' +import Query from '../util/Query' +import { fieldAlias } from '../util/graphql' + +const mapStateToProps = state => ({ + search: searchSelector(state), +}) + +@connect(mapStateToProps) +export default class QueryList extends React.PureComponent { + static propTypes = { + // query must provide a single field that returns nodes + // + // For example: + // ```graphql + // query Services { + // services { + // nodes { + // id + // name + // description + // } + // } + // } + // ``` + query: p.object.isRequired, + + // mapDataNode should map the struct from each node in `nodes` to the struct required by a PaginatedList item. + mapDataNode: p.func, + + // variables will be added to the initial query. Useful for things like `favoritesFirst` or alert filters. + // Note: The `input.search` and `input.first` parameters are included by default, but can be overridden. + variables: p.object, + + // If set, the search string param is ignored. + noSearch: p.bool, + + // provided by redux + search: p.string, + } + + static defaultProps = { + mapDataNode: n => ({ title: n.name, url: n.id, subText: n.description }), + variables: {}, + } + + buildFetchMore = (fetchMore, after) => { + return once(num => + fetchMore({ + variables: { + input: { + first: num, + after, + }, + }, + updateQuery: (prev, { fetchMoreResult }) => { + if (!fetchMoreResult) return prev + + return { + ...fetchMoreResult, + data: { + ...fetchMoreResult.data, + nodes: prev.data.nodes.concat(fetchMoreResult.data.nodes), + }, + } + }, + }), + ) + } + + renderContent = ({ data, loading, fetchMore }) => { + let items = [] + let loadMore + const { + query, + mapDataNode, + variables, + noSearch, + search, + ...listProps + } = this.props + + if (data && data.data && data.data.nodes) { + items = data.data.nodes.map(this.props.mapDataNode) + if (data.data.pageInfo.hasNextPage) { + loadMore = this.buildFetchMore(fetchMore, data.data.pageInfo.endCursor) + } + } + return ( + + ) + } + + render() { + const { input, ...vars } = this.props.variables + + const variables = { + ...vars, + input: { + first: ITEMS_PER_PAGE, + search: this.props.search, + ...input, + }, + } + if (this.props.noSearch) { + delete variables.input.search + } + return ( + + ) + } +} diff --git a/web/src/app/lists/SimpleListPage.js b/web/src/app/lists/SimpleListPage.js new file mode 100644 index 0000000000..a7692556ba --- /dev/null +++ b/web/src/app/lists/SimpleListPage.js @@ -0,0 +1,35 @@ +import React from 'react' +import QueryList from './QueryList' + +import PageActions from '../util/PageActions' + +import Search from '../util/Search' +import CreateFAB from './CreateFAB' + +export default class SimpleListPage extends React.PureComponent { + state = { + create: false, + } + + render() { + const { createForm, ...queryProps } = this.props + return ( + + + + + + + + {createForm && ( + this.setState({ create: true })} /> + )} + + {this.state.create && + React.cloneElement(createForm, { + onClose: () => this.setState({ create: false }), + })} + + ) + } +} diff --git a/web/src/app/lists/components/BaseActionsMenu.js b/web/src/app/lists/components/BaseActionsMenu.js new file mode 100644 index 0000000000..c0749b6779 --- /dev/null +++ b/web/src/app/lists/components/BaseActionsMenu.js @@ -0,0 +1,75 @@ +import React, { Component } from 'react' +import Button from '@material-ui/core/Button' +import Menu from '@material-ui/core/Menu' +import MenuItem from '@material-ui/core/MenuItem' +import { ExpandMore } from '@material-ui/icons' + +/* + * Takes an options object array to render a dropdown menu. + * + * Each option should contain a label, item action, and optionally + * a flag disabling the item or not. + * + * E.g. + * [{ + * label: 'Menu option label', + * disabled: this.state.disabled, + * action: () => someAction(foo, bar) + * }] + */ +export class BaseActionsMenu extends Component { + constructor(props) { + super(props) + + this.state = { + anchorEl: null, + open: false, + } + } + + handleClick = event => { + this.setState({ open: true, anchorEl: event.currentTarget }) + } + + render() { + // Forces actions menu to the right of the table (should always be the last table cell in a row) + const style = { + float: 'right', + } + + return ( +
+ + { + if (this._fn) { + this._fn() + this._fn = null + } + }} + onClose={() => this.setState({ open: false })} + > + {this.props.options.map(item => { + return ( + { + this.setState({ open: false }) + this._fn = item.action + }} + > + {item.label} + + ) + })} + +
+ ) + } +} diff --git a/web/src/app/lists/index.js b/web/src/app/lists/index.js new file mode 100644 index 0000000000..e94d6f1625 --- /dev/null +++ b/web/src/app/lists/index.js @@ -0,0 +1 @@ +export * from './components/BaseActionsMenu' diff --git a/web/src/app/loading/components/LoadingButton.js b/web/src/app/loading/components/LoadingButton.js new file mode 100644 index 0000000000..0a45b8054e --- /dev/null +++ b/web/src/app/loading/components/LoadingButton.js @@ -0,0 +1,54 @@ +import React, { Component } from 'react' +import p from 'prop-types' +import Button from '@material-ui/core/Button' +import CircularProgress from '@material-ui/core/CircularProgress' + +export default class LoadingButton extends Component { + static propTypes = { + attemptCount: p.number, + buttonText: p.string, + color: p.string, + disabled: p.bool, + loading: p.bool, + onClick: p.func, + } + + render() { + const { + attemptCount, + buttonText, + color, + disabled, + loading, + onClick, + style, + } = this.props + + return ( +
+ + {loading && ( + + )} +
+ ) + } +} diff --git a/web/src/app/loading/components/Spinner.js b/web/src/app/loading/components/Spinner.js new file mode 100644 index 0000000000..8f51e399d6 --- /dev/null +++ b/web/src/app/loading/components/Spinner.js @@ -0,0 +1,58 @@ +import React from 'react' +import p from 'prop-types' +import CircularProgress from '@material-ui/core/CircularProgress' + +import { DEFAULT_SPIN_DELAY_MS, DEFAULT_SPIN_WAIT_MS } from '../../config' + +/* + * Show a loading spinner in the center of the container. + */ +export default class Spinner extends React.PureComponent { + static propTypes = { + // Wait `delayMs` milliseconds before rendering a spinner. + delayMs: p.number, + + // Wait `waitMs` before calling onReady. + waitMs: p.number, + + // onSpin is called when the spinner starts spinning. + onSpin: p.func, + + // onReady is called once the spinner has spun for `waitMs`. + onReady: p.func, + } + + static defaultProps = { + delayMs: DEFAULT_SPIN_DELAY_MS, + waitMs: DEFAULT_SPIN_WAIT_MS, + } + + state = { + spin: false, + } + + componentDidMount() { + this._spin = setTimeout(() => { + this._spin = null + this.setState({ spin: true }) + if (this.props.onSpin) this.props.onSpin() + + if (this.props.waitMs && this.props.onReady) { + this._spin = setTimeout(this.props.onReady, this.props.waitMs) + } + }, this.props.delayMs) + } + componentWillUnmount() { + clearTimeout(this._spin) + } + + render() { + if (this.props.delayMs && !this.state.spin) return null + + return ( +
+ +
+ ) + } +} diff --git a/web/src/app/main/ErrorBoundary.js b/web/src/app/main/ErrorBoundary.js new file mode 100644 index 0000000000..88ef3facaf --- /dev/null +++ b/web/src/app/main/ErrorBoundary.js @@ -0,0 +1,20 @@ +import React from 'react' +import { GenericError } from '../error-pages' + +export default class ErrorBoundary extends React.PureComponent { + state = { hasError: false } + + componentDidCatch(error, info) { + // Display fallback UI + this.setState({ hasError: true }) + console.error(error, info) + // TODO: log and/or call some API + } + + render() { + if (this.state.hasError) { + return + } + return this.props.children + } +} diff --git a/web/src/app/main/MobileSideBar.js b/web/src/app/main/MobileSideBar.js new file mode 100644 index 0000000000..e70881aa06 --- /dev/null +++ b/web/src/app/main/MobileSideBar.js @@ -0,0 +1,33 @@ +import React from 'react' +import { PropTypes as p } from 'prop-types' +import SwipeableDrawer from '@material-ui/core/SwipeableDrawer' + +export default class MobileSideBar extends React.PureComponent { + static propTypes = { + show: p.bool.isRequired, + onChange: p.func.isRequired, + } + + render() { + // disable "discover" swiping open on iOS as it has it defaulted to going back a page + const iOS = process.browser && /iPad|iPhone|iPod/.test(navigator.userAgent) + + return ( + this.props.onChange(true)} + onClose={() => this.props.onChange(false)} + > +
this.props.onChange(false)} + onKeyDown={() => this.props.onChange(false)} + > + {this.props.children} +
+
+ ) + } +} diff --git a/web/src/app/main/NewApp.js b/web/src/app/main/NewApp.js new file mode 100644 index 0000000000..34738a14a4 --- /dev/null +++ b/web/src/app/main/NewApp.js @@ -0,0 +1,137 @@ +import React from 'react' +import AppBar from '@material-ui/core/AppBar' +import Hidden from '@material-ui/core/Hidden' +import Toolbar from '@material-ui/core/Toolbar' +import withStyles from '@material-ui/core/styles/withStyles' +import isFullScreen from '@material-ui/core/withMobileDialog' +import ToolbarTitle from './components/ToolbarTitle' +import ToolbarAction from './components/ToolbarAction' +import ErrorBoundary from './ErrorBoundary' +import routeConfig, { renderRoutes } from './routes' +import { Switch, Route } from 'react-router-dom' +import Grid from '@material-ui/core/Grid' +import { connect } from 'react-redux' + +import { PageActionContainer, PageActionProvider } from '../util/PageActions' +import { PageNotFound as LazyPageNotFound } from '../error-pages/Errors' +import LazySideBarDrawerList from './components/SideBarDrawerList' +import LazyMobileSideBar from './MobileSideBar' +import LazyWideSideBar from './WideSideBar' +import LazyNewUserSetup from './components/NewUserSetup' +import Login from './components/Login' +import URLErrorDialog from './URLErrorDialog' + +const drawerWidth = '10.5em' + +const styles = theme => ({ + root: { + flexGrow: 1, + zIndex: 1, + position: 'relative', + display: 'flex', + backgroundColor: 'lightgrey', + height: '100%', + }, + main: { + width: '100%', + }, + appBar: { + zIndex: theme.zIndex.drawer + 1, + }, + icon: { + marginRight: '0.25em', + color: theme.palette.primary['500'], + }, + toolbar: theme.mixins.toolbar, + containerClass: { + paddingTop: '1em', + [theme.breakpoints.up('md')]: { width: '75%' }, + [theme.breakpoints.down('sm')]: { width: '100%' }, + }, +}) + +const mapStateToProps = state => { + return { + authValid: state.auth.valid, + path: state.router.location.pathname, + } +} + +@withStyles(styles, { withTheme: true }) +@isFullScreen() +@connect(mapStateToProps) +export default class App extends React.PureComponent { + state = { + showMobile: false, + } + + render() { + if (!this.props.authValid) { + return + } + const { classes, fullScreen } = this.props + const marginLeft = fullScreen ? 0 : drawerWidth + + let cyFormat = 'wide' + if (fullScreen) cyFormat = 'mobile' + return ( +
+ + + + + this.setState({ showMobile: true }) + } + /> + + + + + + + + +
+ this.setState({ showWizard: true })} + /> + + + + this.setState({ showMobile })} + > + this.setState({ showWizard: true })} + /> + + + + + +
+
+ + + + + + {renderRoutes(routeConfig)} + } /> + + + + +
+ +
+ ) + } +} diff --git a/web/src/app/main/URLErrorDialog.js b/web/src/app/main/URLErrorDialog.js new file mode 100644 index 0000000000..7096308733 --- /dev/null +++ b/web/src/app/main/URLErrorDialog.js @@ -0,0 +1,41 @@ +import React from 'react' +import { urlParamSelector } from '../selectors' +import { connect } from 'react-redux' +import FormDialog from '../dialogs/FormDialog' +import { resetURLParams } from '../actions' + +@connect( + state => ({ + errorMessage: urlParamSelector(state)('errorMessage'), + errorTitle: urlParamSelector(state)('errorTitle'), + }), + dispatch => ({ + resetError: () => dispatch(resetURLParams('errorMessage', 'errorTitle')), + }), +) +export default class URLErrorDialog extends React.Component { + onClose = () => { + this.props.resetError() + } + + render() { + const { errorMessage, errorTitle } = this.props + const open = Boolean(errorMessage) || Boolean(errorTitle) + + return ( + open && ( + + ) + ) + } +} diff --git a/web/src/app/main/WideSideBar.js b/web/src/app/main/WideSideBar.js new file mode 100644 index 0000000000..ac3f9fea5f --- /dev/null +++ b/web/src/app/main/WideSideBar.js @@ -0,0 +1,36 @@ +import React from 'react' +import withStyles from '@material-ui/core/styles/withStyles' +import Drawer from '@material-ui/core/Drawer' + +const drawerWidth = '10.5em' +const styles = theme => ({ + sidebarPaper: { + width: drawerWidth, + position: 'fixed', + transition: theme.transitions.create('width', { + easing: theme.transitions.easing.sharp, + duration: theme.transitions.duration.enteringScreen, + }), + }, +}) + +@withStyles(styles) +export default class WideSideBar extends React.PureComponent { + state = { + show: false, + } + + render() { + const { classes } = this.props + return ( + + {this.props.children} + + ) + } +} diff --git a/web/src/app/main/components/Login.js b/web/src/app/main/components/Login.js new file mode 100644 index 0000000000..beb0fc4df1 --- /dev/null +++ b/web/src/app/main/components/Login.js @@ -0,0 +1,273 @@ +import React, { Component } from 'react' +import axios from 'axios' +import Button from '@material-ui/core/Button' +import Grid from '@material-ui/core/Grid' +import TextField from '@material-ui/core/TextField' +import Typography from '@material-ui/core/Typography' +import Card from '@material-ui/core/Card' +import CardContent from '@material-ui/core/CardContent' +import Divider from '@material-ui/core/Divider' +import Hidden from '@material-ui/core/Hidden' +import isFullScreen from '@material-ui/core/withMobileDialog' +import { withStyles } from '@material-ui/core/styles' +import { getParameterByName } from '../../util/query_param' + +const PROVIDERS = '/api/v2/identity/providers' +const BACKGROUND_URL = + 'https://www.toptal.com/designers/subtlepatterns/patterns/dust_scratches.png' + +const styles = { + card: { + width: 'fit-content', + maxWidth: '30em', + }, + center: { + position: 'fixed', + top: '40%', + left: '50%', + WebkitTransform: 'translate(-50%, -40%)', + transform: 'translateY(-50%, -40%)', + textAlign: 'center', + }, + divider: { + width: '9em', + }, + error: { + color: 'red', + }, + footer: { + paddingBottom: '0.5em', + }, + gridContainer: { + width: 'fit-content', + }, + hasNext: { + display: 'flex', + alignItems: 'center', + justifyContent: 'center', + }, + loginIcon: { + height: '1.5em', + width: '1.5em', + paddingRight: '0.5em', + }, + or: { + paddingLeft: '1em', + paddingRight: '1em', + }, +} + +@withStyles(styles) +@isFullScreen() +export default class Login extends Component { + constructor(props) { + super(props) + + this.state = { + error: getParameterByName('login_error') || '', + providers: [], + } + } + + componentWillReceiveProps(next) { + if (this.props.fullScreen === next.fullScreen) { + return + } + + this.setBackground(next.fullScreen) + } + + componentDidMount() { + this.setBackground(this.props.fullScreen) + + // get providers + axios + .get(PROVIDERS) + .then(res => this.setState({ providers: res.data })) + .catch(err => this.setState({ error: err })) + } + + /* + * Sets the background image for the login page + * + * Background pattern from Toptal Subtle Patterns + */ + setBackground = fullScreen => { + if (fullScreen) { + document.body.style.backgroundColor = `white` // overrides light grey background + } else { + document.body.style.backgroundImage = `url('${BACKGROUND_URL}')` // overrides light grey background + } + } + + /* + * Renders a field from a provider + */ + renderField = field => { + const { + ID: id, // unique name/identifier of the field + Label: label, // placeholder text that is displayed to the use in the field + Password: password, // indicates that a field should be treated as a password + Required: required, // indicates that a field must not be empty + // Scannable: scannable todo: indicates that the field can be entered via QR-code scan + } = field + + return ( + + + + ) + } + + /* + * Renders a divider if there is another provider after + */ + renderHasNextDivider = (idx, len) => { + const { classes } = this.props + + if (idx + 1 < len) { + return ( + + + or + + + ) + } + } + + /* + * Renders a provider given from initial GET request + */ + renderProvider = (provider, idx, len) => { + const { classes } = this.props + const { + ID: id, // unique identifier of the provider + Fields: fields, // holds a list of fields to include with the request + Hidden: hidden, // indicates that the provider is not intended for user visibility + LogoUrl: logoUrl, // optional URL of an icon to display with the provider + Title: title, // user-viable string for identifying this provider + URL: url, // the location of the form action (POST) + } = provider + + if (hidden) return + + // create login button + let loginButton = null + const loginIcon = logoUrl ? ( + + ) : null + if (fields) { + loginButton = ( + + ) + } else { + loginButton = ( + + ) + } + + let form = null + if (fields && fields.length) { + form = ( + + {fields.map(field => this.renderField(field))} + + {loginButton} + + + ) + } else { + form = loginButton + } + + return [ + +
+ {form} +
+
, + this.renderHasNextDivider(idx, len), + ] + } + + render() { + const { classes } = this.props + const { error, providers } = this.state + + // error message if GET fails + let errorJSX = null + if (error) { + errorJSX = ( + + + {error.toString()} + + + ) + } + + const logo = ( + + + GoAlert + + ) + + return ( + + +
+ + + + + {logo} + + {providers.map((provider, idx) => + this.renderProvider(provider, idx, providers.length), + )} + {errorJSX} + + + +
+
+ +
+ + + {logo} + + {providers.map((provider, idx) => + this.renderProvider(provider, idx, providers.length), + )} + {errorJSX} + +
+
+
+ ) + } +} diff --git a/web/src/app/main/components/NewUserSetup.js b/web/src/app/main/components/NewUserSetup.js new file mode 100644 index 0000000000..b32a616e87 --- /dev/null +++ b/web/src/app/main/components/NewUserSetup.js @@ -0,0 +1,78 @@ +import React, { Component } from 'react' +import p from 'prop-types' +import ContactMethodForm from '../../contact-methods/components/ContactMethodForm' +import { clearParameter } from '../../util/query_param' +import { graphql } from 'react-apollo' +import gql from 'graphql-tag' +import { connect } from 'react-redux' +import { bindActionCreators } from 'redux' +import { setShowNewUserForm } from '../../actions' + +const ID_QUERY = gql` + query GetCurrentUserID { + currentUser { + id + contact_methods { + id + } + notification_rules { + id + } + } + } +` + +const mapStateToProps = state => ({ + isFirstLogin: state.main.isFirstLogin, +}) + +const mapDispatchToProps = dispatch => + bindActionCreators( + { + setShowNewUserForm, + }, + dispatch, + ) + +@graphql(ID_QUERY) +@connect( + mapStateToProps, + mapDispatchToProps, +) +export default class NewUserSetup extends Component { + static contextTypes = { + router: p.object, + isFirstLogin: p.bool, + setShowNewUserForm: p.func, + } + + /* + * Don't show the new user setup dialog if the user keeps refreshing with + * the original query param still active + */ + onNewUserDialogClose = (successful, clickaway) => { + const newUrl = clearParameter('isFirstLogin') + this.context.router.history.replace(window.location.pathname + newUrl) + if (clickaway) return + this.props.setShowNewUserForm() + } + + render() { + const { data, isFirstLogin } = this.props + const userID = data && data.currentUser && data.currentUser.id + if (!userID) { + return null + } + + return ( + + ) + } +} diff --git a/web/src/app/main/components/SideBarDrawerList.js b/web/src/app/main/components/SideBarDrawerList.js new file mode 100644 index 0000000000..3d879e4efa --- /dev/null +++ b/web/src/app/main/components/SideBarDrawerList.js @@ -0,0 +1,209 @@ +import React from 'react' +import { PropTypes as p } from 'prop-types' +import Divider from '@material-ui/core/Divider' +import List from '@material-ui/core/List' +import ListItem from '@material-ui/core/ListItem' +import ListItemText from '@material-ui/core/ListItemText' +import Typography from '@material-ui/core/Typography' +import withStyles from '@material-ui/core/styles/withStyles' +import { styles as globalStyles } from '../../styles/materialStyles' +import { + Build as WizardIcon, + Feedback as FeedbackIcon, + Group as UsersIcon, + Layers as EscalationPoliciesIcon, + Notifications as AlertsIcon, + PowerSettingsNew as LogoutIcon, + RotateRight as RotationsIcon, + Today as SchedulesIcon, + VpnKey as ServicesIcon, + Settings as AdminIcon, +} from '@material-ui/icons' + +import routeConfig, { getPath } from '../routes' + +import { Link, NavLink } from 'react-router-dom' +import ListItemIcon from '@material-ui/core/ListItemIcon' +import { CurrentUserAvatar } from '../../util/avatar' +import { authLogout } from '../../actions' +import { connect } from 'react-redux' +import RequireConfig, { Config } from '../../util/RequireConfig' + +const navIcons = { + Alerts: AlertsIcon, + Rotations: RotationsIcon, + Schedules: SchedulesIcon, + 'Escalation Policies': EscalationPoliciesIcon, + Services: ServicesIcon, + Users: UsersIcon, + Admin: AdminIcon, +} + +const styles = theme => ({ + ...globalStyles(theme), + logoDiv: { + width: '100%', + display: 'flex', + justifyContent: 'center', + }, + logo: { + padding: '0.5em', + }, + navSelected: { + backgroundColor: '#ebebeb', + borderRight: '3px solid ' + theme.palette.primary['500'], + }, + navIcon: { + width: '1em', + height: '1em', + fontSize: '24px', + }, + list: { + color: theme.palette.primary['500'], + padding: 0, + }, + listItemText: { + color: theme.palette.primary['500'], + }, +}) + +const mapDispatchToProps = dispatch => { + return { + logout: () => dispatch(authLogout(true)), + } +} + +@withStyles(styles, { withTheme: true }) +@connect( + null, + mapDispatchToProps, +) +export default class SideBarDrawerList extends React.PureComponent { + static propTypes = { + onWizard: p.func.isRequired, + classes: p.object.isRequired, + } + + renderSidebarLink = (icon, path, label, props = {}) => { + return ( + + {this.renderSidebarItem(icon, label)} + + ) + } + + renderSidebarNavLink = (icon, path, label, key) => { + return ( + + {this.renderSidebarItem(icon, label)} + + ) + } + + renderSidebarItem = (IconComponent, label) => { + return ( + + + + + + {label} + + } + /> + + ) + } + + renderAdmin() { + const cfg = routeConfig.find(c => c.title === 'Admin') + + return this.renderSidebarNavLink( + navIcons[cfg.title], + getPath(cfg), + cfg.title, + null, + ) + } + + renderFeedback(url) { + return ( + + {this.renderSidebarItem(FeedbackIcon, 'Feedback')} + + ) + } + + render() { + const { classes } = this.props + + return ( + +
+ +
+ + + {routeConfig + .filter(cfg => cfg.nav !== false) + .map((cfg, idx) => + this.renderSidebarNavLink( + navIcons[cfg.title], + getPath(cfg), + cfg.title, + idx, + ), + )} + + + + {this.renderAdmin()} + + + + {this.renderSidebarNavLink(WizardIcon, '/wizard', 'Wizard')} + + {cfg => + cfg['Feedback.Enable'] && + this.renderFeedback( + cfg['Feedback.OverrideURL'] || + 'https://www.surveygizmo.com/s3/4106900/GoAlert-Feedback', + ) + } + + {this.renderSidebarLink( + LogoutIcon, + '/api/v2/identity/logout', + 'Logout', + { + onClick: e => { + e.preventDefault() + this.props.logout() + }, + }, + )} + {this.renderSidebarNavLink(CurrentUserAvatar, '/profile', 'Profile')} +
+ ) + } +} diff --git a/web/src/app/main/components/ToolbarAction.js b/web/src/app/main/components/ToolbarAction.js new file mode 100644 index 0000000000..07b1ff6587 --- /dev/null +++ b/web/src/app/main/components/ToolbarAction.js @@ -0,0 +1,71 @@ +import React, { Component } from 'react' +import p from 'prop-types' +import { Switch, Route } from 'react-router-dom' +import Hidden from '@material-ui/core/Hidden' +import IconButton from '@material-ui/core/IconButton' +import { Menu as MenuIcon, ChevronLeft } from '@material-ui/icons' +import withWidth, { isWidthUp } from '@material-ui/core/withWidth' + +@withWidth() +export default class ToolbarAction extends Component { + static contextTypes = { + router: p.object, + } + + removeLastPartOfPath = path => { + let parts = path.split('/') + parts.pop() + return parts.join('/') + } + + renderToolbarAction = () => { + const route = this.removeLastPartOfPath(window.location.pathname) + + // only show back button on mobile + if (isWidthUp('md', this.props.width)) return null + + return ( + this.context.router.history.replace(route)} + > + + + ) + } + + render() { + const getRoute = route => ( + this.renderToolbarAction()} /> + ) + + return ( + + {getRoute('/schedules/:scheduleID/assignments')} + {getRoute('/schedules/:scheduleID/escalation-policies')} + {getRoute('/schedules/:scheduleID/overrides')} + {getRoute('/schedules/:scheduleID/shifts')} + {getRoute('/escalation-policies/:escalationPolicyID/services')} + {getRoute('/services/:serviceID/alerts')} + {getRoute('/services/:serviceID/integration-keys')} + {getRoute('/services/:serviceID/labels')} + ( + + this.props.handleShowMobileSidebar(true)} + > + + + + )} + /> + + ) + } +} diff --git a/web/src/app/main/components/ToolbarTitle.js b/web/src/app/main/components/ToolbarTitle.js new file mode 100644 index 0000000000..eb55d426e6 --- /dev/null +++ b/web/src/app/main/components/ToolbarTitle.js @@ -0,0 +1,205 @@ +import React from 'react' +import p from 'prop-types' +import Typography from '@material-ui/core/Typography' +import { Switch, Route, Link } from 'react-router-dom' +import withWidth, { isWidthUp } from '@material-ui/core/withWidth' +import { ChevronRight } from '@material-ui/icons' +import withStyles from '@material-ui/core/styles/withStyles' +import gql from 'graphql-tag' +import { Query } from 'react-apollo' +import { startCase } from 'lodash-es' +import { graphql2Client } from '../../apollo' +import { connect } from 'react-redux' +import { absURLSelector } from '../../selectors/url' + +const styles = { + backPage: { + '&:hover': { + cursor: 'pointer', + backgroundColor: 'rgba(255, 255, 255, 0.2)', + borderRadius: '6px', + padding: '4px', + textDecoration: 'none', + }, + padding: '0 4px 0 4px', + }, + div: { + alignItems: 'center', + display: 'flex', + height: '100%', + width: '100%', + }, + title: { + padding: '0 4px 0 4px', + flex: 1, // pushes toolbar actions to the right + }, +} + +const mapSingular = { + Schedules: 'Schedule', + 'Escalation Policies': 'Escalation Policy', + Rotations: 'Rotation', + Users: 'User', + Services: 'Service', +} + +const nameQuery = typeName => gql` + query($id: ID!) { + data: ${typeName}(id: $id) { + id + name + } + } +` + +class NameLoader extends React.PureComponent { + static propTypes = { + fallback: p.string.isRequired, + id: p.string, + query: p.object, + } + + render() { + if (!this.props.query || !this.props.id) return this.props.fallback + return ( + + {({ data }) => { + if (!data || !data.data) { + return this.props.fallback + } + + return data.data.name + }} + + ) + } +} + +const mapStateToProps = state => { + return { + absURL: absURLSelector(state), + } +} + +@withWidth() +@withStyles(styles) +@connect(mapStateToProps) +export default class ToolbarTitle extends React.Component { + renderTitle = title => { + return ( + + {title.replace('On Call', 'On-Call')} + + ) + } + + renderSubPageTitle = ({ match }) => { + const sub = startCase(match.params.sub) + + if (!isWidthUp('md', this.props.width)) { + // mobile, only render current title + return this.renderTitle(sub) + } + + let query + switch (match.params.type) { + case 'users': + query = nameQuery('user') + break + case 'services': + query = nameQuery('service') + break + case 'schedules': + query = nameQuery('schedule') + break + case 'escalation-policies': + query = nameQuery('escalationPolicy') + break + } + + return ( +
+ + + + + {this.renderTitle(sub)} +
+ ) + } + + detailsText = match => { + const typeName = startCase(match.params.type) + return ( + (mapSingular[typeName] || typeName) + + (match.params.type !== 'profile' ? ' Details' : '') + ) + } + + renderDetailsPageTitle = ({ match }) => { + return this.renderTitle(this.detailsText(match)) + } + + renderTopLevelTitle = ({ match }) => { + return this.renderTitle(startCase(match.params.type)) + } + + render() { + return ( + + + + + + + + + this.renderTitle('Setup Wizard')} /> + this.renderTitle('Admin Page')} /> + this.renderTitle('Documentation')} /> + + ) + } +} diff --git a/web/src/app/main/routes.js b/web/src/app/main/routes.js new file mode 100644 index 0000000000..102a3ff879 --- /dev/null +++ b/web/src/app/main/routes.js @@ -0,0 +1,115 @@ +import React from 'react' +import { Redirect, Route } from 'react-router-dom' +import joinURL from '../util/joinURL' +import RotationRouter from '../rotations/RotationRouter' +import AlertRouter from '../alerts/AlertRouter' +import ScheduleRouter from '../schedules/ScheduleRouter' +import PolicyRouter from '../escalation-policies/PolicyRouter' +import ServiceRouter from '../services/ServiceRouter' +import UserRouter from '../users/UserRouter' +import AdminRouter from '../admin/AdminRouter' +import WizardRouter from '../wizard/WizardRouter' + +export const getPath = p => (Array.isArray(p.path) ? p.path[0] : p.path) + +export function renderRoutes(routeConfig = []) { + const routes = [] + + routeConfig.forEach((cfg, idx) => { + const _path = cfg.path + const path = Array.isArray(_path) ? _path[0] : _path + + // redirect to remove trailing slashes + routes.push( + , + ) + + if (Array.isArray(_path)) { + // add alias routes (for compatibility) + _path.slice(1).forEach((p, pIdx) => { + routes.push( + , + ) + if (p !== '/') { + // redirect nested paths (e.g. /on_call_schedules/foo to /schedules/foo) + routes.push( + , + ) + } + }) + } + + routes.push( + , + ) + }) + + return routes +} + +// used by new app and the toolbar title +export default [ + { + title: 'Alerts', + path: ['/alerts', '/'], + component: AlertRouter, + }, + { + title: 'Rotations', + path: '/rotations', + component: RotationRouter, + }, + { + title: 'Schedules', + path: ['/schedules', '/on_call_schedules'], + component: ScheduleRouter, + }, + { + title: 'Escalation Policies', + path: ['/escalation-policies', '/escalation_policies'], + component: PolicyRouter, + }, + { + title: 'Services', + path: '/services', + component: ServiceRouter, + }, + { + title: 'Users', + path: '/users', + component: UserRouter, + }, + { + nav: false, + title: 'Setup Wizard', + path: '/wizard', + component: WizardRouter, + }, + { + nav: false, + title: 'Profile', + path: '/profile', + component: UserRouter, + }, + { + nav: false, + title: 'Admin', + path: '/admin', + component: AdminRouter, + }, +] diff --git a/web/src/app/mui-pickers.js b/web/src/app/mui-pickers.js new file mode 100644 index 0000000000..9254335620 --- /dev/null +++ b/web/src/app/mui-pickers.js @@ -0,0 +1,11 @@ +import React from 'react' +import { MuiPickersUtilsProvider } from 'material-ui-pickers' +import PickerUtils from '@date-io/luxon' + +export default function PickersUtilsProvider(props) { + return ( + + {props.children} + + ) +} diff --git a/web/src/app/mui.js b/web/src/app/mui.js new file mode 100644 index 0000000000..ab2e781e6c --- /dev/null +++ b/web/src/app/mui.js @@ -0,0 +1,29 @@ +import createMuiTheme from '@material-ui/core/styles/createMuiTheme' +import grey from '@material-ui/core/colors/grey' +import red from '@material-ui/core/colors/red' + +let testOverrides = {} +if (global.Cypress) { + testOverrides = { + transitions: { + // So we have `transition: none;` everywhere + create: () => 'none', + }, + } +} + +export const theme = createMuiTheme({ + palette: { + primary: { + ...grey, + '500': '#616161', + '400': '#757575', + }, + secondary: grey, + error: red, + }, + typography: { + useNextVariants: true, + }, + ...testOverrides, +}) diff --git a/web/src/app/notification-rules/components/CreateNotificationRuleForm.js b/web/src/app/notification-rules/components/CreateNotificationRuleForm.js new file mode 100644 index 0000000000..fac33d7445 --- /dev/null +++ b/web/src/app/notification-rules/components/CreateNotificationRuleForm.js @@ -0,0 +1,238 @@ +import React, { Component } from 'react' +import p from 'prop-types' +import FormControl from '@material-ui/core/FormControl' +import FormHelperText from '@material-ui/core/FormHelperText' +import Grid from '@material-ui/core/Grid' +import InputLabel from '@material-ui/core/InputLabel' +import MenuItem from '@material-ui/core/MenuItem' +import Select from '@material-ui/core/Select' +import TextField from '@material-ui/core/TextField' +import gql from 'graphql-tag' +import ApolloFormDialog from '../../dialogs/components/ApolloFormDialog' + +const nrPrefix = delay => + delay ? `If I do not respond after ${delay} minute(s)` : 'Immediately' +const cmText = cm => { + switch (cm.type) { + case 'SMS': + return `send an SMS to my ${cm.name} number` + case 'VOICE': + return `call my ${cm.name} number` + } +} + +export const createNotificationRuleMutation = gql` + mutation CreateNotificationRuleMutation($input: CreateNotificationRuleInput) { + createNotificationRule(input: $input) { + id + delay + delay_minutes + contact_method_id + contact_method { + id + name + type + value + disabled + } + } + } +` + +class CreateNotificationRuleForm extends Component { + static propTypes = { + userId: p.string.isRequired, + } + + constructor(props) { + super(props) + + let cm + let delay + for (delay = 0; delay < 50; delay += 5) { + cm = props.contactMethods.find(cm => !this.cmError(false, delay, cm.id)) + if (cm) { + break + } + } + + if (!cm) cm = props.contactMethods[0] + + this.state = { + delay: delay || '', + cm: cm.id, + errorMessage: '', + submitted: false, + readOnly: false, + } + } + + getVariables = () => { + return { + input: { + user_id: this.props.userId, + delay_minutes: this.state.delay || 0, + contact_method_id: this.state.cm, + }, + } + } + + shouldSubmit = () => { + this.setState({ submitted: true }) + + const shouldSubmit = !this.cmError(true) + if (shouldSubmit) { + this.setState({ readOnly: true }) + return true + } + + return false + } + + cmError( + submitted = this.state.submitted, + delay = this.state.delay, + cm = this.state.cm, + ) { + // check that a rule doesn't exist for this cm and delay + if ( + submitted && + this.props.rules.some( + r => r.delay === delay && r.contact_method_id === cm, + ) + ) { + return 'Contact method is already used for the given delay.' + } + } + + setDelay(v) { + const n = parseInt(v, 10) + + // Allow the user to backspace all the characters + if (Number.isNaN(n)) { + this.setState({ delay: '' }) + return + } + + if (n < 0) { + this.setState({ delay: 0 }) + return + } + + if (n > 9000) { + this.setState({ delay: 9000 }) + return + } + + this.setState({ delay: n }) + } + + contactOptions() { + return this.props.contactMethods.map(cm => { + let name = cmText(cm) + name = name.charAt(0).toUpperCase() + name.slice(1) + + return { + id: cm.id, + label: name, + value: name, + } + }) + } + + renderFields = () => { + const opts = this.contactOptions() + return ( + + + + this.setDelay(e.target.value)} + type='number' + value={this.state.delay} + /> + + Notify: {nrPrefix(this.state.delay)} + + + + + + Action + + {this.cmError()} + + + + ) + } + + resetForm = () => { + // Reset the form when we open it. + let cm + let delay + for (delay = 0; delay < 50; delay += 5) { + cm = this.props.contactMethods.find( + cm => !this.cmError(false, delay, cm.id), + ) + if (cm) { + break + } + } + + if (!cm) cm = this.props.contactMethods[0] + + this.setState({ + delay: delay || '', + cm: cm.id, + errorMessage: '', + submitted: false, + readOnly: false, + }) + } + + render() { + const { open } = this.props + + return ( + this.setState({ readOnly: false })} + fields={this.renderFields()} + getVariables={this.getVariables} + mutation={createNotificationRuleMutation} + onRequestClose={this.props.handleRequestClose} + open={open} + resetForm={this.resetForm} + shouldSubmit={this.shouldSubmit} + title='Add New Notification Rule' + /> + ) + } +} + +export default CreateNotificationRuleForm diff --git a/web/src/app/public/favicon-128.png b/web/src/app/public/favicon-128.png new file mode 100644 index 0000000000000000000000000000000000000000..dc677b51445048a0c4bf4c88ad3a34e1cad9e497 GIT binary patch literal 8900 zcmV;#B0JrQP)1r$tt zsIRLn_P$JoBhh4m0VY1d%YlP{OM$}xne)&00T+fN(M|xffUXHg{Ih`{0J{Mq1tbJ0 z0mUvzI18u(jxA__iADIGfnNg0DDP*L4$QJ8`zHi`7alEkdEZiLb#P%g61}6q0271w zmjT}cs%%fvzt(QBcS=&`g77%Pmhkn;Brz!v5C#q^n1Jz3_zK`g;2X-b=X2wNZ?&5Q z71)52qvwlV_>Y9ab3qpaW&qU%1{lA@-xqiQ*ewNAD7d7}WRIjkpo!lTYymqsnQwhS z$lL}Go-sS&NK_R_Ib{49Xh-h1$`kIrQi3a5Oah+7_lHym@MqmV2^i*9eDE&;M*!t% zrBq*6YXh5rMZn*}k!Y$w;z5c3AaJSzQUWe&GdMUdG1BUO0FXekJ9{W|D@gmno~oLz z`ruBXUl#*TeO>MCz+J$qa3mTlki3_d|2RQlgh z3BmQvCNn+a=L;sV!N^Eq3g{AJ_|WGAcPgMr!I3e^7rP9K9UYDc1%`qS2`~Fyo(Z~a zFclplqZI&_0bd8+4M(E(r;zX_aBdop1~5M^IIrF8Q{9UJ?0-56_*%71lT=7}12|NS z(Sv^n_?Zh-JMjHhlNpY{?#Bd}O53x|Qn4(cc&NysT|z+rauRqRSQw5(BcCGT2RwU! zLOR6zcB{$Uq@brM6+loyqX88zB>G{FxPUXOQ~32CUlWc*uQkTt!FLCKuOLvY;KnT$ z)vg%OL2=V@suJvx6dW5f=#a3K!mtEsMfzoD_op5Vlv={d#+D>PBB zZ8n)@i#~1r22*iknZs?R4owo~C3{h@@!Dy^BAC^v1!d%_yD&6WtM`=TTLq5 z{2Y`tq=fcX&ucTdzS-o+m`roD1bz!Fs;{fvZ2}YjE5H(9&kh0mB_ua)vDne>E<|am zf1udmzshai_bEayyzO&omwg+8H+>3!mKclEe!*xy{PDoi4lpk%*gYk%hhbzC;0UO3 z1m|@a%uY(~3E8wr5O4qxt*@&+19&JLi8>R6@I{`}yfCfuztUlFcBiC6&<&3g=n!yy zxy>@4BB-<(#ft{|QjbYsm8G!sx38x0CP(oA0pRMivvOYh=w1 z{`~=XpuVowHvx#hC-4HW&;d#mT-joBT9+n%PgVYjp}3|Z#XFWJeos%|?4!olAz@2a zGdz;}`=bSW1N90xqr;$5F{BV>FtbqP!dab?y^?}ol-V>(kno?tp7nLLUkFE{>xSWB zTR;&I@cthJrl8ZLKDt*+QyxDy0b77Bpc9DqY_b^QGaT&^!~~dW3x3#Q5>mZQrv%oT zifhYlI)!G2o)B_H+Ij9~E;dU=yU3hl@mMgxRe--#!GQ@mYQ)clE&+A6;QD5Z-879QeY-*)*|!0$>No(j6%B-R0{9l^Qnl3g6ZwdD>jaC?(UOmr8N zH+(MtQ|?f#W#!UvB>KzIGy!`7^IYJBPJ_h!lVW5`wB`lbg!zw8L~jPu1yH zQ0WNfCIp{LNM_ms)AKFtL9%D`P^F~^aA0S^Kwx!g=2&Sdo(s4v_qnu6NC-?vYn+)v z6TO?%R=R>8v>EK15Om4j#7{HcT_J~O18z4$DPSjCa7~M@`v(=&c~Mug$NM5 zh4$t~%7AH8fJcyX+YREvpvh2dG~j*9Wtm^`k*P=uZDu~35-f;G<|HH`)ps>U0&N0r zF1LwTnZ!-zy!$5v7kEt{X-!+{3hGn66K<^%))<*Ku)BK}*~AFd|HZ(sY~bLy;G*`i zxBUZw3HTH!1wv@4+E(>t;XRL~puvE(hRbwEFvS+cW&b8p04-AS{R*A;$mI_g*d-;n zy4B3Y{b`1Y358#YUVhSRewSNI9XwRb4@aV}j*15Gyx6NAs6;^B(4~^{wjeDhT?z99kv*BBeDbRY72hYYGSGGX3GTvO7s%GfP#mM+_a;* z4v6&inZt#9fJvr;OWRB>c@&^8aC4%Dfiy-!z<-ygXqKuQ@d3(Q_)e>dzaKoSKBAkJjH1HSA6bCQy8wHpP>jiN)_vyS5c`z9sFcFDYnFMy4v zVvVWN*6_w~B--4c-;}T9`_c936B7bvB?XtYO5%mdf>DF7cbIH6Y}T17ljb#nQ#vIb zLlD(s1%D}Wi0jN>3vlm1en&pUulAA|Cn})U7W}x`q^J+aV=SGji3|!Tap8xpCX?Lk zc}rZV8A4)B;??~F875i`tQe}4R!3Wl z4siijm)op06j;MI(h07w#x^SnD0A#D$mqiUl!xg-PQTItAQO zYO_VcWCsq38_Z1#_DV^<(4|=BS9zZ`sNlX5hor6+HUoF$lRWE5{CU7LKpE(i>ebCA z<%+E;brM(8lzqRB17OAUXY2v%UXB@%B?dEnj z(=O00*FmMbd8{EgegQ?VLeH(GHny%g5z5p4w+rq0d&F-<58LF$IBDXa2HXw!0nY0% zIH}7(ZUYb8ue4fRVP!eASi$#OEK0Y-j8m(G{|!0Z6LNXQ?^5OpsvVIRqZTPa*r#YP zvgPLvsr`XHQ!+2Xvw(jE9bWM(JooJEa3s1sZwBz1IUiLZ%Ut+ji^={8*{ejkwN(Ef zOB|joQuuQ0}CqUcRmKhS#C;7*OE|xS`o%M`w85yrZE? zqn6?~B~IqxF0gk>var=4k;gN+=2N=jWZXq0qQsY92{ob%*-Y=7=&SE9Fxuy0)Q*%UO$txtRd z_|R0`RHp5p$nk8?ZI^V)!TmeOyyr^*7)&3B?TtycAHp z7EpY~7L<9Rh!_LOUu!C^DYt2r^y+y1dWXR*d*HdkHVHSB+jPqQpHFzmeI?*>RF}jU z;YofIaxz|fnhB@cT3b~poh<8V_PYdZF%ferzW6*cF3~(NO$%g`J5CY|qSC z+4IGK>VJ?Tq)OCMJnwf|X(>$Lu$W|Fn=z~o=7fMHewW=-f@zKzwCOaxyXQ5(PHTT?DXwZZ znU@^cv?<`mGVRp%kqdYM_)$0#T{IeII4|(>1s4Nnqj$`u$EXB3QZ|sp%I@duL~pkI zNjMVisIRL%26(~&aY~ou%nqYp$r8XS&v(eRwFkr{m$XY?GYgnexo3WdPp0BF~b_J$+ zuVT-a$#v+H=vlgtfVa@oJI{n8(TzFp`8062U%_?FCesE;o)js#wbbFcfW}O@D>$b^ za^PSNmeLX&6Z$>2MNP@CvSVh0r-`)|c*U~-NHO(evib1oNX?q$RZQ={j?MtQb z{?q?6575vfDp)lAK@RpbplRD2f@1CsXX&!if7j&E6Q;@yNJL4AuW$S zN&8E0AcKK^nPD(cz;s6!mOe2Q=zYNVjkamp=1^Z(`?!MR9N>XQ3v&PoHGsbdU1CCU za+lG6Qb&MMQBK8iM^ZFzZ=9A^TAp~C&L#gIHbJ^Gm!j&d)0pw|YxJ}Bd`}_Rc-75p zzV)6FF`WV2U+nN$k*mwJdb0QV3Bv&Ob+whiG6!aUJ|Vcg)ueNfr%I1<_oYsi?cm#K zI@7ND4rRw3iL|;axx}iqZ2tBC(s0M`kVQq^)OX&rZ|`8*kFH?K6&IpB!i${u$^BWw zy?HhXyJdGH}%(4SM3g5D!}@3r%9zdI6+*|zZ2z~ zHig=!|3uNey%4gGZCMp%lr1;}-;5n-``7bBmH}lu)Le89)2{h$H$typzm7%EK8xcz zR8>vNFhtSpdSXl7Cb4og$f1WF3O#!Fp{3YhfCPU{b=Cb1jZLGngF~|IVIJOe@ib=y zZRScL49U(9-hnlFQid?8REnibm+;|=6&a!^rKmdVR4UIn8Mkw28)<#hCo}oVtmG#W z2`<0vI~;b%!CZRD#hh^5F?{p<^XTa8(4QwIGw%4YCjKzBKuqdhL62Bdf$5`#{Oy4O zP6FMGFrscB*JZ}7*AcAW9ih=1wFAINSzy@^;${7pxto5?U}nL;MvhqMUoqS4bohSeAv)=i{M= z9^``azCkLL&7<2P*2+m!E=kA<=%Vl#x%S88K!CS=9|k{y?S*< zjh7vJxQ+;h31zOJJZm0LnGg(6UspRD@FgV79zlUdrBW0hFb}h$JoB@wueyp@ERM_} zZb?ZAixi;N&Syz6vaDqI z;hKWEDVdIZeD!s;`%DA|*vrji|Av{JQwpOvNa*1GGJpS<#~!7qxTxAVEt;Djd0sKXmB}?AOd>`C<4^(21twE-N zQ4-92+uGWSZ4aFAbRDM`Qw-b31*&bq4%tD`{l~io@T2$mv2#iczXe?>%*yg^Dn1q) zIJ7fOllAM@XG6O@e1LRFmxfd-l?es=W6a4@CU^58++=cCiKh&)1k@#ETGP$&M)t=o z16V*n05yZGlOJ3>R5wBmreuYu1+wZ@FxX!y$3-Qw(L-@@F;Yq#$6?yEX&iU_3Cus_ zQ15XRN-5^d$(m^ukK?v?jtF510lQ@-r2^foGa}fQo1J_>Py(gS@EJe|f!(yFn{=0y zgu1DF!!R-g7eZh=4pXL1$!d+pra_rTrBbA}Y$51LcSU6-fk1$%Q>OC3gAWl3>Cb%Y z&1IbXwXb3u2K()w)zWrT3r<`6h!AF@lVzT3TkdJl=JBcle1I>#>U8+-u^_c>BTiSB zuJ}pGtXZ>&#bQ)eRPxK)e$C6TzQMDLUf`?eoJ%YgL!p?r&%T+rh`;kb!Z&ciM%fNs zufCaa!fI+JV_6oLUGY6ap%BWG`q^{lGI#DiR9027%PzZS@)EJ|`?&2LBSM%0LW=Yb zAwN(uQX*{gM3D4`Db)xVz=JkF6<2cP zf-kV=XZK>-^ywbap!4;&Fa}A|2t%^v507-qDI9(DvFtQ+r(PNYES@Fd7eGgo@en&=a~XO(|+?YdPc0vzRhW8al{c^^sP&> zIfRy0V$oHDX9J}q{^1&$A9*s9MEdUKS74{QcW1@paSl4@^IUMjg&FB@dHi2=EPg{r zDkJusMY<|zp{KgWKLd;uglW>bbUF5>hKvV($)yW145L@*r4Y=Ye~4xbA&9^C0jZDk zo2M{JOW1VFT{xXxIy)B#u(PM;GruDxC!ca^=4WHq@8bLsQK{u<`1j^6?F~4Vc(bw!^9~UqsXI{xqyUfPRmuwDI>& zwc~hZfH)9y(e>`pXibtOMXWyi61x5!?w>fGWN!JthuCue!-&ApD|E6b$R{`4#+nPR zr1P~mQSpRMD+__s)J)4?pJL^4XVLn%XTUeQk+y`~O`_IF-)~#y0a8F*p)WI%jLy1Q zdrK=H9CjKrZn>68XP?%+Urz|IZB}3S16uz4IGDpczETLwr1_C2X@2zY$dV$gNmV*m zu&F`U{k`(>X!o9{FWStp2;-Ikl0cU$^Z=xlM_YHk7!pFV_QES^y6+)M4?PHTY7N*9 ziIr<;{l_8_tJjUFTel}OxNRLIn%leyl(Fx!%ULHY#ybPF2&MboOvR|SsLG-uVsA%? zy&cKs-#voMfPZv|pJP@qw_batJDPafR@ClCz-Hh*2|OKiX%`fs4L~>&{WGvGpx_n1 z+UnQOju-exkxL+3Fu8ZM7;Afh9|3GKHJ2B7pgXh+_`u9uBK>skex-@R08as%lfYks zZm->cpLXwmt*L0z8DtNTWNqhev14}0y19;1& zz6*Li+8;`sZE|P9_UQS)>Q^lFXBI|28;(RbO+*clUO)GX^diW0M!^?I0uL8E>D96d zxN@|ki|q|=2U-#W9w^CNe*CG0HKt;1RuHZVN1`83bOu-f{M`WF@OQfc^-~9a1^0!V z^i3yC!2gX!eqp-;&lfl!uuEKcsMr~FZQMk){f`v8gsqGK{5c$nuAA@-;4wi5Z~?$4 zrs7q!rd}_kNA_e~lxNOk}c7bn% zBhmP$iUGorXe;o&wD$jXsl8RMt`%gje>2lBmHm3GuA$2%$Z|{ZVD{>~H}E>A#>&)e zx6sho^j=MMRgHvw8x7b!C7J4qQERnNuQm6hcljIWE{je>(c)bKxyev$G87GlqD|^M ze*H>NR&5dLi(M3)~uVXplpDcnQ27 zaA}mIYxg9u-h?J&`06y|!taZn+$(#}2}hz!wgyti4FiND(G9>A0=+8!<}!Q8W92oD z7}c?=To~?!yApV;*yZoR%+<0_1Ap3@38!%b4UJ82*Hl;80{&mK1Pj63M1DI46)?jV zEDpGI$kB6tloz*uxzot&BB-8bWTUD0Woahl+W>rStYZH0ZUWM9E3ixgzbkQgF_?ef zKuYS{Nad(XKJ1f}Lmt!@K&N;0do0`LePTEg?cAEk9Cr-hA?SD@R;=JRB{rLl{M4@v z?3EPE7{x{d6F4*`hup`Q68cKGP1)O1|2rIsF5TMr9GBTuuOIFN0m)VgKPz?myg9tT zSo@-Oef!2}B$OsO4EY*b6L_e|A!=oae@8eHy=7bAa@M-0FfSoEzr)P?GDr(}(eLnJNyg(3qf@;d+X{!{ zzSp9ry6PF=FbO+EEInX4H<_PA?wgdf2)&_XsJ5_J!4<70R-W;MiN5dfo=|2C{uA^a zfSa}@{>FWU)pH6D2i`S+e+69@1>D?4N&`BDU+pxQogDlY@LUt(0B5xuOwO|~WdX1I z758T=e4Bxz!jb6OZH>PP(QVbx*p#fPu6hDEO+eW)zwV){PYtXI(xu-66OvY`HGz=_ z1AMdH;LwE3YoCFw&?zYZ00LM^L_t&uJP~xcJCvyuYyswnBhh!Z2i#4VcDshgrnZ{u zs=omzNGN;Pr>JlRyW|=2?VFTzNwfdb|vC3BOKwWeaNsiilU5gedC zDVURoqbY%3mD)UBlyUePJ;`6bJ>hJ^HQY8dHZ^B*kw3n@KI;b)VwaGP_Sz?|O`K zP(m=>7Q_U+>vQ?QQ!(C_69>3I9EqN@J>zJ?cih+4)%t+%06$4LfYdnfpPdGW#Pvmt zZV#3Cj!$u8nTVNoLxTQde)` zUhY@iTxt(@Fm*lfPhjEJuId#SAU`H32TliW1ElLoZK(@W9Kj?Lj*iC--S-wMu8>3GB2dt1T=>u(G&$JKmiI+fC3bt00k&O0SYj#;Qs?mQIx$a S(ZFl~0000XsX z>eHNbLQ!&@QDJhC5R@tXxTr;oiij3rU<;!#A`qj65Lg&E6qS$>x^Q74xM&dtMW72O zB-7H-a$#zwLybS9GxOd%Eu0b534*@WX!+zf+x0ANa{{$AA>^#mwBtn;JVm@Zh1RxR6X){0o z0uoRbX4IK%zJhPUFEP8OTZt9SN#vN4Nq@A%_Oo1_lU<(8>l$wOY zYYa`P#$fw_P$fPsn%k!9-Qy0u-&I7j=WA*3aU1#6e408{P2DfL$nQBy!|BCzCC}9w z=krbK@;o)$U{*C0M!zQ?O?|JCS$PoCrGj|O~(yQ$AnnlvU@2;9r`DK#_3X`L;0}~_&DRx?AI5? z{@)UiYBHQz;kc#gPv7{*8WLV(A{4ou7mly;>R#2HS>V*OI6U6HmGcMnfAG}+-gn{v O0000RGOjuhBy0oA}{+K)J1!<5;pA zyrTqlI8)iOC!@La`y&Zl+}Y)DjPF)>{Mj4y*^ZAKUIIX=Wqc_(l-!Tfj7=ZWhbV{Q z0yvT#tKxXi@@}4X*S{T9>eJa-jr3gH{HZVfW_{j$$hOrgEdd1KfBZjA)4{5=z6iiG z_4)Z_Z204+OJ7})Z|juE5)w~z1ImdzTL?yQIf7t-C;T1Y4M&|`00f6U{o4P016B^j z0;mq$r$m9(hww(?Ljef9nIv0?Vwiy>5G1{bCH!r;(Ot*yeOGS}yU>8&(R-8tVc#5} zD(z4!co*yfe@M_<1~eQ%XAop&6cph=W>FB#I;4&9tgQ^eyHwnc3I!qCa&v_cT-t5l zKqVvc55dT;jykS#2D7OtDqb&fFJ@X-A==JYQ|a(T|Jwl)$eZA{4U_~lUliH}E^Z8q z{X{5#07^J-MjRwIMY}gpP0&`u+8cp;Lw)83O8`}^hJ3(xaN=x>kE9hku96vhT}`qI zWb3s^zE~LH_~hBmzI1zp!AuA_faU)NsF4}Me@O)u-+ib@O@bE1?d2LL58Q-x9sb3X zHGq;Y3I3GTqJqT^FYQ4cE*O||dL`mlU@%acOIU70Pvdm?=Bfa%U1|EW9McsJh-_OV znRgeERNev!CA$tATVunVLUXq;YvUnif=UvH6xr8FOqaLz>)~L9kj#)fRj2j zii_a>y{lLKVK0#Zav=5}XM0Cq(l!)d>}3LV`Ws-Zs@_m*r~rH~941@v*`Mm|7|d9q z8qS;#qf<&Kt`Umqe)Fh%XDGK4gn%uiNsgfqRF5k->`jdK7Xjbn^)Km#QJ+)pno--z zzt``0ZD_x?{Pc!T!Kxpawea%TOZw}HIz3UUH6ewJ? z4`1#$-ws!uNV|P?nI_dkS3IDYhBsDvSGHO+>@<5Jbf9WrFv7E`$Tv^;ThfpmhavkMkbwqie0ezF=gd-6-+j2HZYiM)JL6zpsV8!;TFRdQ~{({EE zK-4u*;yrrLTVUrzy*`(PtB!BoLY}rD1Iq~Nvh&gJJA8=D*jnh&W5+51Y)n5U*nR+8 z6JCU-5MGJy$=_vsfve;-+BA0yHaKsl*9T0bA53v+F8G;h)%VTjB&7w!@2-)iNAgX{ zdR|>@POr~CUW*ZcVU5j+U>Ip6&5sGQs!jaDVGu^7WhgK}Q=3#s7+0RP-Q`ryZ(K*{ zn(HiEf<=)BxRyY2laKOj>ZUaXN*G0{acmV568@`CWbu|LkH4hb&Who1O*!Y>x|7sp zc5cF3id{zTpZg;=^FxJhZK!Rcc?#C>I_#NO4xtwSU3`|py2!gy z*3AXB_)n{2PmT?|OIn@uPVsZ%MNzl?{sG0gpXTh3EMj|M>qS}2k8xk|0w!n%>eeA= zGbK`Ik>mK5E;cPG`&sI9K(=BdQL+=|1Rxme8TMorikTfHPu^;6r&}CMp+R=~(7Q}EnXPV+4DmVD@ol_PiLMe^#|v~*2$D|GQy%xPb+8Tbpciw>YDXO+ z)MVh9&m)#2&Q%(f(&#T17@#LbTRo0 zZH~>6Lx*sXqMu2aO>jEG9p#dg3h+~RFXLB%AJd?pBW%G4o8KC~#%qiQ(7#G53_RE= zN~^g_r;QoN6N&H6g$DfBJW#|a!S(wgmg2?OZBIYs8Zh?78g$R{Xw-8;G9xk_t$2yw zqYEZer}xU7@+7>3-i6Q1NW00KqT@*m8*Z$zG{WLDbbNt=tIr+F@LtO1$&|#!OA9bANjWRwu5RsMlY$V z>NnkIZH#d^YyS(;t*4OOJD2gNT&BMVLGIoriy%fI579{Ei5VaBHwK zd(LGQ2rFM@>4(ddzu?JB5H1uIC)LI%6)ECa6E$3FUE5owFjD)dl8RQX5~E_M&+2#$ z9kd_Z+#h;MKT_gl_f*dnaWURIm0Bd8y5*Gt#DNJeu%xO? zeTA=WcA+UNd?Mb)RuMM&89-Rb)e!+D z<@e~EuLnlsY<{s-`EfUr8O@hkR1idHLqBKS=e6L+uP=JA+=st70jp5=ymd9iCcJ$7 z_{nJtCVws*RkV*$Bql!Q>&LY}zM9(B9yd>&fEH@IO&8(U<#9@4{3c)ERDN?^=tm8M z145TL_xsxW>`S_ys%~k0x2`v9iIm`oJoSy%8QdZZ=Mn0Z`@OW;$E5r}cEK+Tx z%{TcJJVY^1RNIs~d3$gDEaS~LVFau2hZnKWyFqq5m&}(W=%@Y(m}~~ zoM4u399lfB<0VnQXMM97zv`L1$?q@!b(|y8L5L$K&^%*sZ8q7SughB& z&6mCUKB#nzUC8%64_3|sTe~2`cg-;DI>Y`TjX}c>2Cwt&d`4aoJdM2P&Nd`x zd%DXiMj%&$NIyLEHJCp5+htuX#!sU@bfSb2l*x-QXB0!W2~_z;k&o7>9K>TB3)Rm0N8c5vNcxh)SgC8U;9 z89-Mb5ZD7n?IZ9QOa2`Asq}YIDOI;DoF?fvo z9Ccl-ha6Lu%^g~lREhi6MO5yx8a8mxRpB~I4u}@m$D~Q+vCh}Rc>~+#ID1KdG71W@ zR1iz~k=_^U<6L(SkmY%Z_rOihFS{a^9koZ%Y%YA}8X&-vqDzQe7YqLLI&G*A76Mgx zD41}nqc#2U+Eb=MactsS(r%}x*l4BsZu;x?cj{pFoclENYro7$jG$9blx_{onXK?p6F%?Ixf;B{`pDD8+evaFDm!ERVi}fBwB)Q zx7`%C*U#=y8{r;6XE<<*szEZZvBp_Vyz6QbET2A@lUnmv%+S`M;1{N^W<8b~AC_SqJDCAdZIs zk^ly!)^-Ybi_&N)P^3@E`ZdIORQ3^;?qss)-MDQ*f`ks$kT(j*>7V(qVqYaW_sooO zAq(6wBEphF7zpn!L^6hN(rK0M7 ziM}V#uvT8j{P*btIFZBVzR5tY?4?7%<^l>ltm>v=B_)|Zfu>4_0SK0 z+!X7Xym7kV@F3egtZJ3D%wCey=@{As7tt21l=bah5L;hY4ly^cLCkFkUx_c%UiL#H zMVnt+F~>e~<{SFe0cXvmT!R^XH9w#0y=}%SBd5@mQ}VM z0fs5(3%{s;f4aBgI7=6VLT1*~VQ3@a)22LT%7gm9u476V;;Zlh>3ZvRbqBa}_Wslo zeeD@#%fpW>R!te=SH{z_PgTn`A-?XZ7(+83^sZ_>)--?g*;=yu$J&q(m#pDvrms*> zyC+<`By!lgcSjm?#c)Zoy~Sjpn@ZsNN$zq-EnA;GrGJIZG2=mVylH18$m9ffnpJhs zUZPYs=)A_&Z@TKHwaHFjgx`VM?FVdT_QJH@qTd|1$>UU04l;`fR0es8q4EC%wNc{atEumT7z6Jj$uDnu((6g`)PN158a3p zO~K-50LnOjfjn(xE$)-K$Tvrl@3Kc@&%^K`ePkpiX0Lb+VO4j_tcqhech%=CqsVQ1 z2g5Ie@1DDj)0q}Fr%1$0;t{>LusKWQsb0N!$v^xuAVRt~)T*z1uO?1PSH+24IVH3i zO=84v5mHop&qvNeYO^ex;Uq+HCKQOTf^(vn)DGCEmzYS>w z37y6hYW&B<*bn`tZe1cxkol6AnGP$uB{FYQ6Ue^QzukSEJ%2FbDhS5d^!K!duw3TO z#8o%NLf*_&M5m%bc2yeICI1XXeu$G~z6~b*UW6Q%AMsk#k1#N`ZwnLCeVp8TaSd6quzt*mZlzx}mqJ2`dmf_bB7tfC$at0X$g9@EH-U}crr zlQJb)-HQ@|GohZ^xKV+>5b-d5%bOO9gE!C4iK1$9ug3l!Y81yh-lfGhPjt?(0g%|N zT|H-Ki2YkXq@$C?$hnfYuCLH%x?;aO53g>BYtU*gHI%2cMV#=UD-yfw088~&#V$LI zXnZObCx3q;jeRO2NV;hekWX>eH3w(f1<&&b|4bCZ^19q;Rd2s zBH)wQLoSfGBBl4oBk#Q*!uOrmim`l)81gTcFJYQp8?(>VtNVGVt$P9Q3%;~^DQ~8x z!&}^aHs>qy)&sJ7@FvP?nIb>s`qH)s&%f`>)(a%uoLPuSVT1TJ;lsZSZ zcX0#Gmmpq>7s`r%k+EdV__4%@bbd-_c^9HM^mTP@r&gOu>YUFdV~2(cgxnV76CZfY zL8({>^l5wpGFbJo9bM4It(8+epKruc63S%lt#4 zH91Y80T`RiR~e>yhxgAdwraP9HyuzI>nP%h#F>Kf|1{lutLE{q2#2|~^5T(g&ap-E z;nZQ9bT>=Y&vK~L!oTt91YlJkr0Zy#>Ix4GytN?k3udA6xsSJSWWK=`mW}#Qc_)@} zm4`%60~xu3S031w+SBj-s^J&f*lH;noTh~^qW-K0zs8>dBIYIVBIIsHu@VYLU(Gv7bKRKPrY3 z-ZYWPHdMDiYobM8nnyJnCHEg2=hlEmhM`5}g%)T2Zh-!15fh8D``?n`=icw*H!?C8 z{{U#kfQ(4K#v5yq-@Zu^`t(7n&7tVUih_1#rNu(bjF~zXC+Yp@z)fp1O6K=58?*HN z3BRIgI{WA^-Sw`rnKjQp{;leT3tnBHtsK?@WWksWSZ5O9Z@^`6(oQXW9FYVLPO8ng zng6u5!A4i!;b9wa@Dyy|vTT=#rLdoI&IryN(z> zLx@v;_*lKO=(z;!KQDcjvr@XQ_DZ=I(RDAS^!~-^@fsS!GKjp1n!O~x*VT9dj8Z!V z3(2@13x){IpPJgLAOs$QHeeEHymOF$w@g=j3U4XPbsIS6MIDGH%(I4qmlxMLscAX}9xKrO(bGMCI5n$Wi{ z3Uq8#`FFr2q<`&^U4y{Hq_<=4yE~uNXiIL^Tf48HOSRM%q?--n{V3Jxv&YQ&m~g{9 zgN)AxAXiB@oK>OzD!<>E&X6gyC-ihx%kr$!Y2JXPP#EdU2MnU(@!(;{W1=ox_-{axfAX8g&AK1*mLJqVe;GN)u!@{_dRU)P{CKky5qmeJ6%$_8;n@TtoYxD8kDqc|YbJv!5nG^b>POi-C> zs@(m!?dv{MNq6AQ^}5FH3jXHah8B3%pAHcp0s+PWZ3QH?A9fZ|jw+ zA0w^%wnH+GX+gx3zQL66Os*ppV`?ULx0F^{?&MzZHF;lF7)F4Ue`Zztm9-#`S>|1; zd=F|H!zk2vD=>FxhGpTB*%s4rV9z5ozV$1uqd}g zw)hY$OZSv6h<42}>X&n_t=E2KOpd-nEke)k8ktgsM&LUIQK`@=sb@zv&v2)aRvN=+ zF!TLPCHbZS*3~bVn{86$v98sY%AWU(HBnIw`Q?FesjT9)jR8qh*@Nx)e{6V9Jz-@~ z#%F1Z`GW(-P| zxqNacq3^y67+72<(d;7pB*PnM*{UiX%@c(BPh#uG$epS^!JM;-dRw*Xim@SdPCZ8O6MQT zLC@3KR?~MkY);%GL|%#((s3b7wyipLxSHy+yuywX(fT7Hr!5dxA-N;bVHmD>DvF8i zWF`D8Gs35p5NQLW4d>f}vXgHC>Tvl9jLC+#+QdQgdnf}M5T_)x?zgF*r^prI8uDs4 zltZSN>!9@&A2@Jp?;R0HGW!U%r)RiK4#V`x5b8iPh-Gh-pLD>bOvkRMs5Szr5B7(k zTRAi`F&x>JvGf%_jPP-(`=T4w_0vsq!f$0IGwxs+J%jF{u1NaveDRs$gzEH~&@t#3 zj3s6>tQZ0$b{SM+13DnxEfe}EWm21`z>-b2L#@C%*KM5k^#85DVg3d8t-*m5kr+hJ!4q9JTQ2yf`rKwNHyzfA@{iGh$;Vc z(ZpMe@5JuYWS1GJo8k5;kmL^y`Rw^q4DDxU4Q!fUE(E} z0Wg4#9lt{OA>FBM7#KwWacXI8uP;f0@NX!eeX_KJ6pfEgGHjk#Dz-0kQSymz)?T*1gztTmE$zR_v=gk1If@B)cU;R%cHHa<&(J`sPmSt`K=bI%Nl zX30T+H_?|V8ERA(*dSfb==aGBcV8a4W^QR$I4;_*(*9+U;_s(Cl>foUoKkrGS9U(= znA?JpB9SMEG+bJ=_QW%CJ%R88Wk^$>4-w95F=HeJGw7+^U6m03KA<*|%f-X55?UX`wx$c3qbi$GML)B|H9=@PcU`;3EJl zC5Uhz;TDz_hG2I-Br>>WKW>OJn&p32HrG(mdyF^!xJvX*J8j^JZ1|mbARYl)V)5=w zZ*j+*<*1bblXcLIy*)>#t;B(ggTwBAhKbcmRc*3TDdd9}sW(*{UB5XqA-U|+OX9A# zXR0hRYVlmF;ia6*&k3N3owJ=WA+m%WoGa%g0Y{dErCI|$+gQh?)Sf6c_PE&BgyB3nn;T1bndY*CoJPpQ1P2ZInvZCvXO{TeKH=+4mFbR8?;8cy zdI;6sdJqvu&pjEI7wu2%HLus&j$1txYxVW4 zXlpHsKIIQb>LKGHLAxgoCkkHdgc$c&}j79x%;L< z9(M~#B$QelSNbV@HWNOLmKU;2nW)%KR?ir(eY_mN1Jk~%Mwld4r_4I-+`+tUiX3@8 zkQWFDuEk=(wLT8uxRfNML2y{A*)JD=z{yluEdPv#P8cuO{!}B}wx6E;qEg0}R-Pww zx`!vXpD2?}TJgc5{-URO(a2|l!b@n$hgGtS7+9sn{Nj zYh^g#`#xl|m}6HE`8T{lw|y=wu1HMb)a8-kyMQCzCF8M!iR^FE;e2VIo{!4NR@yzv zr?}G72(=m(85`qLLL<=N7B%qu%`u^pq1;@1FfcAXa2JCzaQl^V z*xfaiRTx*m_NTTImv0gA4gt-UuTs$W>;2(R1g^c93NXeT<-^6 z;p{vhebJ`mpOGjH^Q&=@xMEe=cclgl6zrm2mVfG7cRyp9BQ;m7OwU=oD1AEb%s8)^ zPg?CI#@z6jGH!F;EVmnoqEfP5jGL`*IZTZ@q&oc~-tL%wf4wi()_m7GTW!)@8IoGk zLuhXJ`7^Y*p-9gy+G3Cp8M=+>8do%Q=db%wll{4tcQMtzNEIVkZfxK8Dk8)Tizm!- zjelg14EF!k#Ab&|6e=#NG?s4PfjpB~ZS6C>JV*AiNDQ*bnl}Z#-A%9h5D%5ZL- zXlCI-knBaRw!bb`Fhk-etKCs?C9<-dx!|aan0;=Gj^+hVm29>$_aF zMhn@uX^Xvz0Vvx{uw6V~DJsq_n%TRQQZL;Tuwq-``;0U379Bl8snkSOqXIFGikplY z^4+s)WCSzh%KmH>Ag8dYn5imDglG`yY)ep<_f(!}$*1qh#Hz@BkN-}dUe+^3J;J|z z|GMQy5a!vDtvVHX}$p>QmfNR z{ol)T)dcaa&sO=?-N*73=yAiltTSV5Q;QRwkBeUkGXG^urY*s}FHp;rd*_ne1R{so zsHlhRafV>9$3*jGJ`D7g8To`;%_JSy=Z+Fj8>eU(tKxeN4SDqR!oof*7jaM{T#boJ zQ>PSllrWI_t~7Ig(M~Q9?{~e2rKBzzrg_*#rnzBb^TzIP? zD}Mwvl-_zXiTAc4j>JqI;J)U)-O~I-+|=Uz!E3I(I919aelaUWT_}R;`BMJ*D!unI zr1yb>>h+iW_sYuiD}r!r@?c---l<UeDY8Q!5n13zx* z0?KrbI2~5T*QwT1+D32q+kVRc$ah*D$vG}Gy`Dg!PfF}P_qeYw;;$)s)vc1&Gru*3 z_ETj8k4$Tes;Xj-vq%#}y(PtwcyslP{reTc=W25g>tkiOO~_Lm23-)n&kTpAt~&)G zJ=+k_cn)6*Teg75D~RZ^Qab|Mv%4Ye^ilQm!mFqTr&)|?^u*PAr%F5DkyUA)gO6vX ziwv^XJLTidjK^-z^Y%lU2($C`_P_&=d`7~mzjg*c_*{a=gu33~sY*IKvs2VWGBX>k z@1!tjez~~tzv7cfddjJ+`fOZS?1qh(V2bm zpDnjz8~Z(~9uk>#%p`58I=nNlscw1oTk}QgcZa9gEBXf>Ddh{H{fVWGKWMu4KZLG- z(b?I5hBdc{xCepg996YoMyn}|nyzM3XqP{K!pYF)5kgUv;| z-KblCd}(>2yovxdFS42hGBf4{;%1I-uPnur30%)4nyP7Qr-?t#HN>{`igMYXr25}7 ztiIlsB$_ubs$_off(8T5m=i><-&;@SEKJ#-^_-b}$P1oXG8CcIDh(QCmC+~@#|9S? zr?`6K5sEhIc5?jkiq2917za%9laDGUv*>hfp{SgNlifyx5EGwHi zT7U#5%Un2FxoR{eE7nAau(!8|oJYt65q&pu9Nr1XQ~NXQxKYcva&<#CRJLiN^ofJb za!j{3YSLPnJseLe$)qtaUN<;myhC*tF(`)VIPqY`cS0=`a2mnA%K!kok{g8F*ciC< zjET<~JCvlt>j4x6$HTOC>&p}Lw3(sMXfc7!krqQZlCl+277nBJu&m6BMMHM zm-O#KS&mHdh<*OdSt>q7H69|sakkxVY1hHoncqgoXZO;840~_kSE)kEtW=r8C49UZ z&xFMp|2@xgChn@mwC39o z7yMZhr`Rpj)Us~mFDsOLnfC^x0&^&y<0!y+Tx!C9z*&A(KLI(hcySuL^oG`>8&jh&OZ z&~0sMEKb(${7I|kR372!PTcQ-S>H0o9@i;S&D3RcUpY5Th8T5dCIz7se~ZN zJE4Q%TOyz>yg#>kw&RA&!hPFyMpV9bWvpYRgTx_kL7^hNea@5%ca)^hHk(c(FUx>xc@|3f7GyDuNk@0;0Nkzv# zr9%%eD`7(okbVLRgEP1=)Hxf@mJ(%BF8-^~654lQl^hixmSXvY*^gL zW{ozWB#Ef-UU{t~f;{?5n=e0B%%1QUPoP;60HtE_?X+Y;(mXVWTLlXG!FPg+7*kOM zLLoG}1C&h|2n7iKsJ>-pIzD&*cjk6_L>Gv^;6f+_fa|7|Wrv(Em6t&# z&>M5n;E8Ks{MY0Ji}K!>yCawuB!Uh8HZwpOj9})cP(O%DD}|SafB8uvK@|O-no|F3qR`bWxJ*t^##r(8Wtnnfk>=1dM3B zSn_Qu^Fhq9!)LJ=qDj~+>^0%H)oxpPq1`(~kZ#%G0MF8nRuM$-y<(*on6^2tWVp$Y zp?|8uh{1541iTZobr#Kd;WCV<`lOWc%q<2zBaXFzDOhS*iQ|WThQ>!+bRvl0U{*tA zsDZqOmwG=3#j~^C;u({_O8|xvg7xDV-*7Y>8@_y~z z7ytqg){SveapzH=*hGPVg*7A?0GY0@?^nC}S$_fhPT ztFF+s@x*C@X9r3_=`so*63w(4k$6N2I^W7zYTn8ihY=KIu(j+CaDfnxJ8fa7HmX$g zp$>jm63&;eq^+vI6^RcN2=h=nuV*ZI7a+)<|E0x1Hv58cC$F7ELCL6u*48TZk4@!S zU*ub$xvA+IkOpui(}0mee!>pvMq*E>FZ~DBD%L#y33kUX&kUVD{2n z#a7GKEGDe>%nh>NpsvJ#s$#-9sFV#fC81`UC<$k^e)J-o+b-t(x7lyVLb~{|5X6O% zru4Be&h`DW%{~>JFK^MkajBumc^!owt~$MG=KE(?>c`MlxC^*VHZUQYbHe_ERA)Rn z(wY8!Ts1hAvvUHKN7YBkwwbRp;=Ac_dKS7hj59LIzBIL@>80UvkgG~9FzLe)>I=&; z3s(D7IK0G+Y!)@Yu36~%B9!5xN`D|u%?7ps5yDmvZ9L4AqR+#7#!gQQ?k6@g7EX)c&m}^RzE*r8kw=K=3BDo z2Cs*3Tz-ijMMCoTdq&-k@|D&Dxduf1UP!{~OvvJ2_Rk-Bhc@0`6{n2ggyEyr_LE}E zrW)8otWG}{3Q$pDDd41;c|waz#=Bfo$~aqGrvHWTu^8d0<5WSjC%2gJQbx-yM2mn& z3-$g2wo2L4-avs{)cZdSU*O(}CL*-xXZ8ME=ZZ#V0;&p0T5XX6$uZJCw;`g?Q%q1< zR@Shipa@(UG(A1VJc7-r*~R)sghl`%=i<634B<4Kf< zXIrp41^fOZzT6L5*e8HdrGG9UVeLjh?q`~V238S7o(KdZNXysr;-+er?O!?6A0i1Q ze8c{gL&i>CrCOVzHZUDx*h=uPS$K~;ObP1fsqtp20|+XaS^TY{nBdZv{P$j>3$d&{ z?5xG@>+l-3!gf)rRT*ig2>54s{KZX1nh%^QEJqU6(><_Q!BnVTfc(L14tw)5l`X9! zLKhAa{ZxduI^|EIlR6Uc9yeih?(X9`W;&~7X`s0GK3M2hY zktUHW0NJ7ht<;S2rQPK$p%QiDWNTuSFnZhf`q;-fHQK3q5X+d_SU+nBA-d@q4!{&q zaXvrqd*vJ|U`-Bd8w?+te}svqED{*528EP1X~o}tKpq?4cv1ifzZ8=b$`+Vw34Wn` zc~KuxSl0-KnZ4L1rtFRQA2LJuE@8l--JCnm16JUL2i4)^;p2VP+-5_Ea?BpzlaIdV zWaRMWt5(RclToHj34ys{DE}cL7iikn)Osu4^K`F;Cc?Dj48{{7Bsk6{>^7LH9fFjA zbU^fb(?zhd+})FYu$Z($H_V?w@0jf?@tCbl;mqM21^=8p$x|z5TD5>y`!n6?7boG+ zMA$1q^6zd)kYK2R=1(Oq3OPYNHtJ)car)3TG=0_m z{u2|mzH+Q*0$+LZ6ij$lGGVcu4L9?E zMb~}%heRo$MZa!gkvyB=Co#$Vw1l+UwyoqkNiM@Lpghb1e_r?X=$mvt-06dH=@=f4 zX{aI5r3)-cY@Wv2VZ6A3m941t`0sWJJ%6dX#ToJp92vR7-o$Y<>$Z3DoVdybU}@vN z!&NqW`Oqg&5^k2M7E2!16GR*>5Y*NJD-IwySFl_p%sj9Q3-elNE5N)lWf@v%DF=+Y zB=%J^X<#l^=t|~e(&+m^G6q|E4sZ{t$Rl_a@Hu)~1m6^Js z86(5}29Z4vUrox>hpSiLbXa*N$Lz**ye*P8kJf*=fw|oDOg;+ti-h8SEC!)9CO`O% zsC~DqkMm$bsz;9K*_G87DjL;B^X00(|J*!A7?R8qZ%5W095VL|YYKzhz{FIylfV}b zo9XmjFkf|Mb`*(H+%iiNg!5>{uJ4=QuIcGD;^$}}LVR6q+m0*3%yS?1 zpgK^3AsW+d$-2Hh2{XBjWzv@AEHV~?kRKoSUPjuQq5H5oJa~C|hQWI>!BoKOj?j=M zY8D^PEX*(}e}JG}jI3;LPCbf;!)NPK4;}wtj59m#3F0vh-LN@Gibru7lFllpjS<&o z*72WUm38Svzkh$J1*=KHA>Msvdt4*et0e2FLruZeVF0^z-+ejE+qVe6KMw0-01YHQ z^NWYw&O-;JJvWv+8Kxgb9ge3W`w&ObaXCf#=vJ67q1jEr6$YpVnitfRBJn;sL9RI8 z@YLyru}W@Gsi&8p5GY_thGp@NCLbs``-O|lX+pd|hHy;li_XPotwIFnc&783j|_Iq zk{8RKl5aDyLAGp{ZreV1pRN4NfVWf*bqG9RSN9g5Q{c;(eU)4H{>GJVBQhfazw87SYN>|cVe$)y-Wp+#b>}1MbjEO*9&?AN2 zm#W=MCWs;Ehz~@;;0-Vk19ri&f=poVjQ!}FgJEl0G$~}r@4n)>&|S{>d!DZRVa^AZ zeW4mAv)1yBC5r>1?j5c-cIqH%Id?eV@*6_&$NKH*PMvYkI=BxW^?d;t?*BVQ%D+*) Y_RQ9uGBt?7-b4i`%BsoKNSTHG5C2W0mH+?% literal 0 HcmV?d00001 diff --git a/web/src/app/public/favicon-32.png b/web/src/app/public/favicon-32.png new file mode 100644 index 0000000000000000000000000000000000000000..66ef1656bdfaf942042d564612d63b647c4f626f GIT binary patch literal 1641 zcmV-v2A27WP)9Xs-)CsYs=hM9>=RE)ad7l4s zp69^VJjB-ldU+zgtVimrFL%Xr=Km7-JMnn1-{jA2S>yYwh9p0%a~4iY;2R6#o2M&g zerEEo8pTuHDf`65fX_|D=a)O;oy|RFETkc*p+ZwRA%Uj(@%_K;HJ3Izg0qHZZ^YY} zOU!xUn@Y4DJZ!tAg4Z_1Z@X$p?s*_>@M6T{TLnP?!4Xq;hza7ev@L7wUpXvC<|J@V z!lRXL=2$?t*j;uKpdk@YC}?&B)c}^JPYU)QPugbyx)vnFO6|UUU&dH{{gCATnBqX0 z$F_cxKB@V2m0CZJz@qu_j`q(3&G`xBS*BrIRPkoWdu>TxUf<{lW+*|m^3~j9zzI{+ zW$FHXW$Lx><>lIYGG~ zs4E~n1vo%K_yqF;$~>shf?Ea+904BCUakmx{OUj9r7)`D&4_EkrW4)wVSL?8$#$`Z4QNUFNgZ7AGh7yD|Y>6tO9(RriTnTnPR_T(m zG!+^G8VtY$Ea2XlOIm2w<|H8v4_7I+M-{i%IAg#KU}uF#b5YXh2=2MWp-jUS1wn%= z=nQIhRw&*Ic`H8Xwg<*t#nTqV*H2Tdoa=}c5iO#Q&~#e*6$S6M4H}PLJ1pr8YMw1u zY|I#B&=^v%=)Bt`Fd(63r~p!OIuu|~XlA>uFpjdzU9OJGNZVqeHJKkc%| z=vV$X0!#Bot9L%$^BjS$t0DHrA9>)tzccIIzrguSRs(4gZ|~*WpKjoZ{jbyT#KYiu zfUo?;1&JeQ{i6LT2{b0+GwNL-Mks4dUE9rUe{wSi5B`H^c0EgN>skQ9&5PLAafsu` zkMW1!@1bf*3*ON1h^>}_U{G*nNdy$cYt>k_%Ya2TlVR1W)uht{$WWQjEnmR(JhpA$ z!Rj?PQaE-JVOe8hy|^G2j=R{VBH)2#=}|Hfg8tX{@!+ews0l?lojFJUcW(e-`0yuO zT|0wd(|ne#ypB(n--56LBU>E-(>*b}BmyR6vQm%HYVOec1b_Yr;k(vT*tDJEsWS+} zz^Ew4zGfX&YnC(g!N=r}97Pz$=)(~5{GwG-7Hn*or&|Y&ahHyl8$w3HU%Dilz*A9` ze(q>@tfVaX(9$1A^!WKwz`?HxM@^k9NrV7h0sX>WVTmz{Nm#?ZF}IKl5IK-KtV&A5FP!3K}ZTv?uIgj4w-|x3{nN^vu|~*{)cAsS-#aW56l% z!f}fNZWuC{u6#G8ERV;dYSXDMd*@_!jZ(yv?dd<1tgmiZT##2pwV)+0d8bU1m3~N? zi;`P&5+!_MzpPSwkEiS(O=Z(4Rnh65zE|I?ubP)|WYcsdSeExYsg9uL!L&i8C-`U3 z{xv$^v?Edk)FoW}8cnr9zd#$@>t@}8?v%hEW?|z&;@P&N7 zMSY%>t*xmJDi|!j-aF!nq^CYd%3o#S?zEyxgNX_+30$W>2Z|5(;9n!cMW^R9mvm?{ z3Vu-TV&Gio|GDzlxQcnTIbC{7n_;l$zaJGC?J71oGTAwRMZ>_!c}2MbafC)-Q#_Gu z`Yidng*!FeJ2hqaMoQxXQuy2hA_4+M&VDX@jD;n6#mM7aW%9sgM_dOQYip_(0k_2y z$>t-hdC3Ymb0MjIF6Y&4d3s&ecO_v}nq$uU@?o$~=WBd@m$$>5#kxV4~^o;uV## zg{S6q?d)a;Mx04{BDN_6|6uyL_+?>X=8P6YM7kS|-~&VsmHv0sFQnK!i==dV)+3#1i1Smv@gB3NLAr#LmIQNSM~ zmi0mTNW;TUKa>wx_`yjz%@9vv`}pqe&u(ZlOi62gSnkfsD85o#Q+-Z6k!PVxHO21gI^)E*|?muR3(3lEigBm=Tt!JQVWtm11y zVM5F*Dm;i;#b6K0tZ%XkUx7v~>@cv)2)m5XuGs}V>j;Y#G`ny^P`K`#tY&(fZ^(|o zhXLV%a+g*G#TL%aYTgQ4o~R4pDQMI%vm#GgF*%+{zPe8UPN=DV=YotL_sv#A{s7V@ zSHZ%F$HJ&E%7cj+&A6PR$SMK?efgjiexFmY*$8ijEpLV`tqMCnr}VToLscP}HNeu4 zWp1g507U{nY;qW66{~{6gp9%}cs*>HTjFhhKao6opS8f)n(7%L;lUp_1}L`&?x#V4 zP{I6i3J!!2G!PK~Z(8}uOj`LM1%M@C%cCV8x)9^}%Vnj;@JfXTW#WT0IC96fTKLc_nRG@LQZ0OsQEWI!WKsW zZtnV-)8*7-E<7F#sb29p%T&F+0@PQ+|AGDm6)m+nV zFhaM}4+#8wiHC)U6N%*ZZUEHQR9AcOr{NyuKWGk6V%cxy4e(Tv$I_4`W)){<4~_D> z$xjtHpsR9PV0Fl{AZl3>79MDF2+Q7lWq<`mmQ9W{285q>TferZdW5TZdqP&1&uI)$ zY(WlSlbao3Vbl@?W;gpA>1u*O@pJI~;Rgp;5Vc%2B+vB0E-yzcj_mDje~~tnTg6Tz zBm>fI{U!}BMm>VUym%tHwcEiBu8iGKZn^B?CWqVs#CZke75sgLM@GS^InC4#y$3}9 zB?6<|eJ4}~_)W3Lb5Xk|Ia(DIwmC39+euwffw8XQ_Yq54!})2At2#gX zX_?1nM^>&&B){I{bE+z1Eh){lA3B69J;l&M1ttA7`z9^iJ;?s)ODwc;V%fiAKSMgM_WpTt3fXVSha&5o5!eeWyCwp*hMsZ%TWz^Wb z;uu#^=_yJC0-|u0P_Nt4N>7+a~cDb*-j;130wZT)K5?*WHdE-MXQ44 zAxl~d4>txd(lY?@fDjQlA+N}&P9E81gr|xuEn1k_;_Ly?0E;7*c|O|{fL8;;tau{1 zY`@y#g-Xo;BZ031=K|vt3@*HH1-1hVfu8`a7FL~+(_{Xw#q2qKSHbK-F7;aYUbDk! zckf3v8ez~rtgJKG7t~ap?rD2SxFQ16V|f}4KUyMO5SlMX9J#4~`m800c%9Tly@0l~O2U$Y!#L2&q(xbUKaadAP2N zG6uRRiDJ*`I>s>U=%cyr8`o1-Ue1^?)d1wT?dX*RcY|<39<1Vo{?r=>Lx7dV){o`A zVIY|dUMj_qsw#?#idpdd3w-?XR%YFK2Sr6ic+Dv!lkEbSreoPE0IH8ao_p`RpAjQR zGVz>A?D*sp08-0Vp#r^cJr5j{SG*gP>H!4+P|iMLsoU60_QQ`Ee)0+2bKiZOSUZ*z zYsZpIen92WqiA0IEH;}%IRz`wiuPqI$$YSZZ{K!1yEBW?qrb$sQ%)hheiNBQvTw^& ze)}01bvVM{KC`$_?)wSFMMa!{{slfL`8+pH|0V|5e)mHtv-dW1QIyR$&!i|2ps1)A z5n;%XAw(llHedg@*e({++nx&7VnYX-1Y1*D`UXL3+TK{smfPz-L z5x2RudpJ{yj@9efbj3|HKmP|hR=r2#Zx)ake=+HG$sWVsrzL7Z3$X!p0R`Jr+HZ6B zl~CwNn8x|f(Yok0V%J=b&17i!-hyJdyk`EUZ~jBzzW(Km#xOS74igj|Loo z0_t~da4esE28f_t!HSSz;OTfGnI7@rhOrf*$H|>VnT;F$z z4)EOymu*I#OeB)i_S1C_tyERU-dAv5JSZb4Wp%d`R0XE~cFbi}NT}2PsT=%RpMdy( z9iUainxN&}tRX19oA9G09;-w0NdUgKtG*$HdBhI)zzngT?7 z=v-7<#kbn_o?21(vrDroT(&u~TEi9bMDm~pu?IYRud8qHsw!i@ZPT2#B4o#$oi)4v zTLXY{3uk6EA36T`i-SJ_I-}KLM3+$-1Jr5Z-a#JQjJ#st{CFbSaR@TlftOwN4Q^Fs z?3t8i(vpZBby8uBWw&z0qPQTfiJ({&5&{A#C7hP?!)Qjq2SMSUa+g-kD+(?IvUT+h zhopQyY*HA#zb7Efo|x54Z#OvB&-}c?K*iQBgjwnN*KV!azxEcmL%}~Ctl1wi0J>(2 z9?UT?{IZl`d{*&9sYk-;v9-1fSO(0BCz9(1qJhE@0-;!7s)ZXhoGDOTVB{8H74S;o mm5e8nyZA!BkS}B)(mvH#^i_Vats&(G-zgg`m$-AMvO5?SP&gTW?8lat9&Hqj;rlXEgA=Olv2 zQ5cCFEs;#lK_FoM-(S!4-OOz7-tOMa?w+I( ze{i3bU!Sed?0x$5X@m6j>yW-Te-F&oXV^S_4256#|2+ph2RsKn2RsKn2RsKn2RsKn z2RsKn2U?W_Bil#yiReE#{x9?!s2|iHnj4x6niHBG`Ze@F(2pVWyH@3!ueawwZ{t9O z+1WVG0WA*g2%Q9-3tbIOf+j=HL$5;9px2<6pl6`RpgW-}pi`iIp!K0~kaWd=K|g|e z8*6-dJqKn<4hV}8Ci`++7djn!3i<^40SbCZ-|_p8AnAzPpyQy;ps~=ep#KNWlDzem z@*HSZ4hVA*$$sA(dK~&1>M7nU`Mq?)d(b1$snGABkeQ?6#^n=hB=KyT)N#TQ5Q z_gg}bLc(Ji+;(uS9n!JfeKlYEmgi1~u7&o1=7BU;@VcNk3!5){({R9IiKA?8^|yCJ zGoTWfj$yuSfuJyQ`Cw#W(L8m4Eywqi>EF=x(00&p$oLbRhMB$~o&&vz1K>yf=uezt8l0*(LHbo&hZn z{T%W(K`)X$U)08Nz+#1?=IRcG-hfhg*Js<;&l40ztOn)}4m$c>6^!cpa4_oso`R-C z$EVEqh2d*Ozi~aQN8gq6H-1LhZ-q92GzaPRK;uNqzq!dd5aC08{&LVAkjB^!?9<=3 zjis;OC}^MSRQmZzDa`+q&vBX(bj)!p_iy5Omh1VMf2T3a{m`b6*8@#1hrW3AcauaKBQQhg`m42`O#C@r_V1e60{B4kN*448gZY}0sU^o zC%6Ldk>4==zDhsqw_v_)KFT*PoM&iT*w`mszKOCrM!o^X5e$Hg4y+H?{aZW-8qa}9 z<~2vASlj2I6c$756o#*d5C39t|0E>8UL4D{o=-7$gvJuZ?@gB@{aKi23^IIOe2x<+ z+xhqxuZFH?&f!WvpJTo^zJ|h*Jbw%n>58H{niz(OlwlFb>ww0Kr7uCw9I$fEQGULy zp*Nuv4#5la_icmrq0hf3>ho*Gy6bo75ylXOVH-x+OuxU>=MLiy~R*w0{p#ESIsS423kq+C`fFb6ru9YNdRUC?V| zd7pD!p!lJkq5p&$);|8lo&$MtKynRfz3tJEBc2D05x>*X@0uKOFWd^3|KvT_(w~nn zg~ym)kj=lai6yeXt$rNBd$9@H=RP5Nr_=YK^-I!=TC?oO19=Iee{NklU}c%3uzw7c z^6{%b922w+*}=@w)sC@rA0PiL+|v)welIZtXY#Hc>CaaI*ZA*>Y!KV$+hYfP_ip@t zve6sWW8an|D#M)rF#dxQc7WD1X+5LjpS%vJtEBpuWz7LA!yFa&ay;}E6vLeQ*0GF{ z51@avv3nLTSsm%`XTL9KA8-t~TZ8w>@2CFP*$2nUrgBm~))2_Ymz`eHJmj6|pg-_@ z`3I%{;yf{NPv`U9+>qA+Sqr6qUfnnl$*;wJYOSuYzYw@Q8f-ldsejB0^Lmb+HyOMA z0&u^6gtbaAAID-lF+*GN9F;HQvJt=3Ht0ZjFuzL&%!GHBKm)xFsGGR@7iGwS81_%# zJcWI5NWUr{T()z27Nhj}_~=7?F)v?HUmwT4JqQ2v)1&%6N;&h;Pfx&uW8uXjt}<#Z z(cdAj12U9L|EPR95aHhX`jk7zf&Il9ho6ygpW21}jyZ0l-`)aDjdt~|d}r}p!IANT z<|0S+eIzFx@K58D7&&q&uLJTGP5<7t`MV>>RKZ+WZ=;hwJ^S zwe?Hs0*%k-iF_1~=}4(h>IOyv)qg*KY5Z(E-dAv=2A}{3{mG+8S9N@e9s*IptrelpP=);Ku^Bi>Lo( zjvR=vubAFlAjP*M$Cl(rU(DFK`CqI-uW` z;oPL#4!8`S`ZWVN%CUcR^&CJZWtR&3%Fp)!6vLdbzX-VbTaNwa-*d*ktxr4eStjH8 zz_bk^9l#i7#Hub`rg?!2AwM@*y_ou6ykKF( z$}X(L$`LP9PtVZ-#3_#I%sZ0CZx)}W1FkAnFXcr!7y3UahZgmZ^c<*+1Cqap7UTFL z6vLeC<*|&J4+r}_jh%C03XCYPkIC(om9H;e{_^Kq*OO1ZY+b3JVg;4Qw0JD=4ZJ=A z`fsSR#`1T04rIsy$z4QaI6ef$u&;AsdI#;#yjf1MJ@mg}PA~LK(Yw=S$@;T&8?j6M z&qW7>>k0Dm3AU5VbmYjTT{_?k>b)=I*9>JS*Zxu6a{!sM*q1;0Ca4I5Z9{ef`^wRk zL);AUIA%Sab4{FA5ixV=kM?w z=phFrXAvo1->J~|Pz?WC$F~W%uz5M-vYf^FlV(g2#_<>7Ay$rfnesbV?%{m;{y4AD z0pvQ>{DVUWyaoT)fK0peuzCH@JqOBiAd)lLoLZ||G{;{Uu?FMvTfjsfb}{Sr$+;88 z%QhbS;rgJZ@YP+HQ+?w)Kzi>e<5O|yfM;ooC8M?}+kXC{=RmC-h~(=>9OdiRet5C| zR&!%Zn;4e7uy6c*?s>6TIpSri$2n^RGFcxS$GYoWjK!hJHU)cgE&n-jY0Qp13X$A6Pd?u7o82 zJIkZ^pgAC~14>G=zfdg)tUPfX0^I?n&!E3SMgBhZM|tM&Fm`{B zb9U#c2N?S(Kj7?lp#x>7=Y@UyT*U|7P%1v?W7=ptNb#5@+tgq19H@i?k|9WW`W4ro z%G;+ou=R45`^k*2_oT0DHoHHiPgp0c++z9oJ}mCzNBKf8H0vrHyqQj0EeH8=KqW%% z?{RX#$`8lop=nUe?w7xBd@Lt!TsE^hR}9>h$ar1$@s8CkUe8L;TW1Dw+BzbwZAD>obmL-#^4e8}H7f0Vx`PkUE=KY6o@$GNex#LLylbJ;@c zFb>lGWO?!=j(^@md}Z6e!bFn3;zq_m%9>7<~v^^R zU0uevjQvf#PjABiLOI4iruC!iGdyE21&7V_18sRW^mEAD0coN4pA82h+0eZ4{*cy~ zC-C1kcxU`gdF8-N%QO8yd3Sq|CV?7182XV@!7^MzG=O%_88P$ zu9tYvfdmI66A`V-QDep!_8Ie!SRL8EqfULlcM=x!*6FZFBM|ML6>^lMKLS9}OMK>PF+ z`L<(ajF-1j=fnUHJhYDbF^>84x#FcyGUGLeFW_3*w=dM%Lhoze$pOg%^i$|4NcrXy z*fM#1a>?P9mfQ4a&2MYp+~eubR*cFdJy7J|N|ZNU{`^0y?^iC6iA{rl@lE432d^}) z+XvG8PNxj`lh)*bWFVr|IKBhLuq3%&zYcSKY22qjGsn%mdi#JQO?gHZ#zke6F0lHb z5jr8XNfh@J&WWI}a*KyZ^Q61a;@KT$+~%;IrqRwmUqEX@K2hrk`~5iHnZ&;Ae)hg? z*nC!6zJ)F3)mi^4thLX5oax`R{y=faBgr$YeR{(F&guot9oTc}2TO4sSIN2koqZNZ z=0XZ1Rx|4!^Und6=0&BSwMO6^%IcgGypi_SJWoO%{D;=zfZ86C9jF*FtsMm;mPRAn z%59a`n%+ufIDKEF2W*Uu^a1^xa^-4o4(-vSc-Xd~+nP9~!tgc2v4i3Wwa=#Xg!bG8 zzhMqB?FTNsp}n{*@mc$X%kQ9ju{RX=p#8%O>?pi!#;w85#$ii!I|E2V6z_|5Ebc|5}3sYU_x$=lB&A!@t({u-|n< z{QdRDMJv6Jxl|KREnh~Wy}#u1){wUWS^)mGojHz%rb01!!0yMV)jac9t%T3K>l)6X z6h^EKH1cnG%rwtjepYRP4?}sBhi>h%0TlbO zbR-|G36NR5)=}FkA58JuKIX^jlfybIS@zGVO+RUUz}5J8mrUCL>I)Pb{AV0j%kC_yUzHWPs$w|IHd4P+LOUw`)IWCe($0V%+2tnlc^jaYYmNy2 z%K1_h{`v(l!-em*4h|G+V{P@4#$$<#_>ibWJy2kt_SKShR zzOUtMewSWiKG40#l5Bt@Aa4WoLip2ecHF-N=c#;t+OJkI{>`?w*Cn6+yUKlfwepHJ z!h?5p*#OEHs5tRnh=A5Ea)f{N{bxdnzTf2VpW`M!?)UvaG9aCyn4oKj30fRENvsd@ zet=%6Mk4I1|6d$(^!*1$?Otnt;eVHZpImPq%LdrZWdl5m3>73({;zj%Ky3x7?>`Mn z_5I6|%i1EgWUr%=tudBVO&rqr)^KVTEW-?j_K4WZcje&zMjzSNib zzTfx#73x3jy}UpDN5Ti7{$wJg{4q_4fYvUO+64L;bS0GN`@=PU%InpN{h62*MLI>4?C-Vpp3id7sgNkJUKj47FW3tpKS2rVLvNKy2Z_QS5wFNW> z^bizlGwn&QJ^VcOz3!{l2C{1c*fTNd2T)AeD5%+B(6Z%GTR_U`{TUSDUvqx!-^;oV zpWmZe+4uLGK12C|*#A8>2GBmSTR<&~w`Qui+5yrU?Nojr=KO|kMvf=;z^vHj^K$Qb z6_)|c100yfzxo%+nq;n-pwN>5s@;jdB`+l$cdZ7Q6AM66=0kmIG zT((9{;`9GPEs42is|0R5>1y58!|_{*@QV=LBe=CV>Cy`$dY! z`VflnFWY}ScH6Dq_V-w?66-E~IhDTTG-PW`tbZ}GruF4U1{(;2mVLG5J;&W4>Eoz< zw5P`wwEN>3w7p9n{CBN2rm%T|#vxw`AHWsJoc57#SqwI^;^KS6|BE>$baVTFqrm8_A*nt3L z0j-COCDsCHkFfqw{e|DZ(Tuy`{Zi0JP=tSMf8zZ|&3<3lZXN>}VW8E=Jmdx*x(zbc z7Ri~Bw`s`TFvz2^&KwZWBifSVOsEt8%HQbs^=NgxW?g^g00!@gj3xU2cab~4H%Oha zuh&J`*LuH`y09PWfc61@1^<5R-&#Jptnr?H9&vyJCxHKi|4%Uh%S#shUwt_soLoF|7~XfHs9~{q@VM@b<5AxprikV$XOyM zxbVLY)Z%EYfAz(4Xgf$bnIrs6W~`|J44KZCYkJ=sa^zy4<+vft_4Esy^D z`tn8izZn10yq&U!+o5AEqJ`X2|iJ|}V1TR#i`7y16*iOBkX8R1`hdtVJj_L$cE zX>ZTw&G#WZjGgPLV80!v1dWs;SMS1msqdr*bZyELUl?1U8TwU~ zLG@aEOxmj1wvnt16BkxuG z-|;T|`}n`i5nld*5&jDt-$>x!?C;w|esZucpWWFfzGKGvm0+aOd!z%VKKVtk@`BIR zbzCj%Z@St`!E;l-R1s>@Y0?8AIG`c3lJhnOp9XH`DKY#pjiw@&fCx zX37Ea8!E>CpuL-_FA-LD-|}C<=bvi4U)tvX{`>EXaHQ{Me)k>Yh2m>gN9dVXUHm~X z|6KH=b?~#RKEGf2B{ROS75nkBYfNy%KRz^m5k0$hY}VtqQ`kr9An>19|MNa_IG@LV zrtt3eVG;h@Ilc*X+5ha*&@6NRlILZ|JQGZv{Dq)Wgm2;CJMjI*7hjm8#YOz6>+iky zzPZQs4B`LFFTM-*K*!`AAE5fk){y_vRhD?Ut2vhr(EO(64eFu8YS%G#{cmFZzvNH- zP`5JcZ^(`V;y2U*#rFAS{6Fwd;D2gU^dpkx6ApXZ`1@RV5VogGnF_w&D~Tsv`0TUK zg4^%7Blzf}kDT}E%f}x0Jdhoo7k>2I1qMtBuKoLmysfkyVKeDbOMAFm*TgC&ptuYMd5zo8=hw+}qLDe$kpQuFkh2Xo;?n0@e}hl2Ai zxS(X;Z*il0Z@c~W;N(+I4Zi*MTc;hNIH7&EpJwLR^T3blqvwD9Rrzs@Epz%fbLMx! zS6_YQEW7R%k3M|=+hE)EU&-B$shK}!4}eY5_1KT_iHi^$LGa3}uLi5GzGiUeU3WR}*O!xS|HSmsS=#`5 z-clV;n|XY_Cyi}={^px+f*CVrIIr3J-+KG);F)KhEvfJH>__ka%=iMbu0ze3?_5Kv z{C^KJzq~g1PvrlZj4bx`_^+>0YG(XPb{8HzHF)^m&m_`Cn9z6Uo_BsQdfu_Yq)GSc zW6?bpGdk+;jW^#6mRWwqV6!c@3O@bxQ}bQ&o*x3m2mWrQ7c;j33i?R$l;sx3HJ|;^05w|GyAf^m_o;Ss#_cf7`%A!GB}d{tEwE z*OQiQE8o}OaAVLhX5L`f$hKhT-F6Suf9lV5zx~bG=bRf9I!2q{?7h!^^u;ra?$%t+4~=S5Nxs4Ho?93-KWda(FXO9$I+ zzXNuR+dh(=aqw=_Gq+=U#uM;A=jA4^dW0D z-sk44-M)I^cJF=nGdLd6))CCNz(T=w*WVy$n7=q)EcTyy_Ss;?l~*#P-Aacp+$gVg_@e(b+_SJIBLWxM*CYpKJ6!QJ;vDk_hwF0Z`uYOvUn zO9kzt=LxR5`WokN^yPgMKMfWf*i7?*+W+fJ`u;@j4?d6jlgEGkby39g;`kr(Gxto- z_iPSHc+pxIt?koa90U6N?z`^>tKi2Ij;Vt=F1hrw!8O-j7kv5USGuM1PrBl<#~%+i z*zk9q_v<%f@Ef@BCX9aZ`FFwQYrdRG->JF?gDyXY3Mso~`yF=2uw1>ut9)hMCXkzl}ED#5t#_dT1`-x68lSL~{X({X05s`+rA!Y!=C4{Xx*eZ;W^@ z{J#zvEQR}hEARj0hUoy!ldZY<^TE6S{sw7J_L-9Z$&;T97Fc+ZKz+Y0x6bFCf5G4n zJM0*of5C;pO*h{hT>g(Mg8dFSFj#Dfr4rcJZ*>2eXPvFbCG?B%ruD>HkJI+cp2t9| zE~_s59P9Ypb$R*amxCq8FB1#}|7)$YUhw8yZ#m1Pa_R5$&p!`#+<6yMpAphy^UaSh z@XnIIIr_jiGtpz&^atAicQ4v3;rmza?-ijIMq*>CE}ldEAkDva;=eF-GqB&dJwG*G z-g51iO+W0y{|z_Z6ts;>8wiEHIsT_}iJ_ zMs?YG-B-+gu`AL5)3 z)K70^t>nCOHg+u6r8_2@{bWF+ibUeFzMcVgAecve);8>!N(te96T|3a>oLKw$oJP(Zd?A!j>7+@n{N?(LI0{h`+Obs zcg2P8n;bip%ePxsX#9Wv-`+LPOupj>V|(nd)6QnhlCFnvEj^%e>gXE7*xzWJQ)(XI z8`cEu@Vi$t@n!Uk7n=LK)a)yj!vEi?hh(sYk=U53i|3HyF3yCy`XICSSL62m7XIab zi{r(f3rNQtcYG%O|5n>^7h+4TSN|6fYJLH(uQA=_SM5XKYykGDky z=h6=+GLA^UL+{hNz~+bpVD7It?}zGr2-Fw=wD?=a_lWl87?VNrf6sLWKEUi^V5;dm zH2&Xn>wlGu`#=4RGc&>euDkDPV(#O(wD>>gq<1nI|96Z3ZMW+&{-rRWZPg!N2P|0^%E3*MU zEBUS@zgCuQ-+ccA51P6Da%1AIy4X4uA1}Q45-0w@{r3A{@9kIzoB{sj{}cYw^{{n1 z;Glzx{C|~{J${eyzsDce*t#-^w)!BmRq6bF@W@j{OgZA@@NtMg;VV#lGPeO z;s4M*UJt$}rpRL69`*85{&rf;^`Om+z83y9H>f}RYELCoR+ZtT-60eW}%NCV$x) zYpoN!>&O`-yuAGUSK-<@%F$Ci2zO>ILtbx-NqbaXZn^cgMEp-z8`!q!{wO{ib@X5L zxT1H+|EE0z8jb(<0e?+nU!PBej1Ag21hn>>BmVyv$LpX@{Euu~kolUwBma%@0chNO z`Z1-~%qYgUtGu?{l^xC4|L`MA*vnv2F>|ax>M8d1d(}fZzTbJ<9sm2{OD~zVd_5V< z=(iVKbg>ivGa1Y5y4h=)jAd%)hjM=HM0+L1{_;0%1GP2)8(e$wKceF~CNR}L_puGb zzwI}}J${n5vjqL+S6&I0UUvDaZD(OcYX@AmzxrkMmxt~BMrQq`{Qp|}ul))2XZwB~ z^`|+&Eqgczs5Joh-dntOShmckA7?j~sSW>8+~0^b()j-XdGo%%=E48ww8zXY9Y7w? z!Hx3)3O9;defFv1_#9zJc|VUl>gcNRufBixJ^$#;7a+X6^VT=PT1z~iY5ou4=EBp9 zV*soU=!q?(vCMKSu2ga@vwSGZpWL`SAdLO{T|>@~ao|4@_owmZ0I0zT@UK@JKubWH zkBMvmtpixw@86ZvRF%B*=1+pL{j(X%)Ykty z_xq8IDemv5kOyFcIiNOxlz(+96yYCTU04*|eSO2m02({5zsw8Hb-)&{*IswM@#PQ4 zuJ00Jl8#A7hIUUq3%4udfvSa&;P~PzyHOw$^6tOQKzS~dVrtbd?y%D?!5L?s6}&`_ zopSsC$>*=Wc?`Bjt$M1G-;H5ko{P^cXUohqIf>-YFJ;T9ZG?U03DB6qHSQO0#H;;x zYIOcj<@-4hSt_#2&ZZu-L5&xFUjoAcwFNW;dJc;4kIrseh&X_MfQ!b&0SH&Z|H90n zh50~>?PseC)aU6>m{5$6#fHAK_oR=ycK=oBnEPTSoc+&pZEuV3Ea^ z=(JyKdkFWkX*8BO;>e@So@TDP={wCgE>UP?-VbzX--k>tnH2st$FwzM`Wz3x=HNio z9{=xIz>`dK(pj?uLlU+DRj^x-|dN4o6r zeclMP*H|fEfaW$IdiY`NnB9U!msrx|GE;vqdw+wCeivMQ%{67mGK!hlYuo(yGA+*w z#r$4D|FN)=CRQ%tT&#hW*W;QLY-4n+8uPTm!Ytp#KZFzk2e@<;1}k8(04jIO=p z^TCsk_FM-D`_F@Y#rf9@`>LDpuRVINzr6e&pempI70*8R9QzjDVfHwC^s&c+4@>M{ zYIUA;iuM;>fcR;>s~kV`y@u`2J`$;MzjVOO(EmUlc}>OvwF$H!^bQp1YsLVD#Tq&f z7?)FFb+vKNF|YkG`*+B1R(d>OF%duNeyzoM;O2q^K>c8|o_Oot#`pGYYRTg@#A@#ELGrl6T&)>SpP-5(_dB43OZ~HeH_EnsyUH+Hj zolu1T@Yp`!FBy*kYWKAXKZ}i+YI5|x{YDs9FI<<>1@@Zop`7B{i$k%6_3Zb%uo3pj zdbj2^rabY5id9wzSbUc|>KW5s{W{oe^_Lo2udtr7{V&7zUl18e`2E$djFX)CzvknB z+6DR%bQl!Fzs3NYWIF~R7i>7DsLiivZQFI}!_PbAU9)D-?yD^=}p*06PlBZXD_G$mI zuD@_xBdm8(5PS#jUw+}M;I9Y2Y4%EMp865*Ki8?qP1l|#ksQ6ou^nnIf%m0Ja6s)7 zk@jSa;a}^4);DvyJ?XEl+_w$cnPQAX7JuS-x|!{cHKuNr;vPz8VA|2P*c=ZdH&cxD&_Z+ z99;zY^*#x-HQ$E_`*sYVHQH*s2&c*ka%lz_)ta!Ac;@J?XLH^*bZhnGPHq}t>%;Z<}8ur z7e~cP{uq+}RK7ILpNf9&AbtNcNU=OWft26h^y!t>GP+-}fR95FMs?JBfGqX~Mo#9q z*|8T4dB=vYg^p3)A^8Mq_X{*dU)V!CM7ls27uGG_o2subsvqSX{6`}vNx2!Ncz>-g ztyBbC>OS!WlHXl;o(mcctpRNT?FXF&T?#2j*B#I#NaOPd9CRPw-38qU{T(_P+7Z(8 zMnJ!W%IX95mF)vb&nQ+ShJWP+Tn_BskpVUzH~oFFoyeGAIBv9Rw{7$yO*T=J2-mJ|jC!8UM`n)cdt2_&9J`+q_+uJj+jXI%EHNQ_t7?N{U0J(xR?!8vo*ZREkAb#>E`!v!$JW=zaaHbI zSP+S~)li5b-l=vdMY0R-fp&m8Ao&bTJ8?|q1&*G9zrMZy1))6k2t(>K4l7lz=zHo* z9-_8^2bo+FUHx}=n6>i$IQrvqFux17=~Ce{nRitDfpkJ^=m6pV5$vrq%~}pKhGT6` z&uuu{2H0{YzxNndTh2s|WOr&TD*2KgsM;sh8*~Hp5^m(DYlDROtDxzSc;Mnq>bp3` z?Vga#qp}89va38Q%E?z4_Iu=aEAlFBZ}KW@F0F0Ij?lIyPS4ykas_a+2+!9%tbJ}- zeIOt0%g_nX;6iBst3De;-$Jpn;s+eE9rB|&dn*IA95rUVow_a0v!&PK?}(p=<(6+y zbIV=xgzDGhHbI2>sGpOL63)ZilX||bzx)Ytw-#?d&zq9^CT3W`qC8jvHQ--#6W!T2uB4bUUQ@(s>|Zr8hvkWEoX{NcR1&p;e(X zAmJ#5nYcXLbBjyKCUaq|l@u1V-mY!%Zs^n_k@vG{zblQ+CVScJ$-^9Lh&}aRBH6|| zf=6AGjmNmTqu+J>{w(I!58#;_ntqgW@vd@M8>O+q6VPgq>NQ97Io6((+vEnkns-W$ zdnU8tIx)+Wa!1PDHXk~019)>RJh>^-1=2(K3j01loo_IGk*P2H7>9MWxVO*N{-&pA zQueCrj@_-fPm$u}^3emry>#oX$Ps&P(pFt>jjk=(!AUtXea1yNMrd@b>{mx-r-kaab$Tt+GvkZ9)xrGyE?~a zwx6&^wQHqV)4j3ZVgDI@fW0mIKsf4>31^u#?%z9_2iT{$Ug|^t$UAzvuaxBU4s4qh zyDLwOuaUkiEJ0n?LAPxh**$B))5W+CU21e(8uzLv{)-W-Mq`O;_*p66%iYJkC-KhV zn)uO_V>Q$6vdy#5qoFNCo0G4peZbN5CD?#Q<}4pLnz7sYnZ3{C_v^mzryNch`;zZZ zh1ACgPt6K=^3l zBGp&z3+B;}if`?h^I~-Ky|lCX%)IfQZmalbD=!bw{~RCny=p5bc2q~j5=<(tCwb@w z?N43e&#vX1^2~mJsEua^KwcCnv`$p|V|4>-U=r-B6ve81@%qeFxk*y1$ri&4x zOv1g!!9yVVEi|?-fqT_WxF2K2@n-(I|An#qnZkS)dbbvgxqhRvMB_yKhoS7K4n_D; zePyf3HxRon8-RU6HJ{XDnZh<^PR_AUOzeH}cUOB(z3>MJCp|B#izKx+jypnyfLPmO8^_CywTH-V{pPKG3kO$_bI!w-J>907S^!ah4v%=pNm0LPMV}XYI z11N*V{vMpm7e-yv6ye^q*Y+mYx$)0;>g{;loBcfOTa>Rol&hk)anxV@kPWajvV||c z$2I_EKyHj3+pSEe+oIfO+5PK>SZcWa1$InsoP`|Nv30Qh$yj4Y7KUvAPd26w>l%Bj ztFN!5jd&Jzh2|SFvpwl6H1}Mrci=NRN%Ib+Wg(I?V!A^AWKkXD50FprH&8=t0KFGd zoWh<^EDuJieHh1S9y!cmAv;Iaj=~3u>z+j2+!vdRZ9y8yIq} zeMr}#C!f_HuR&XCO?4L7$5({UK|WMx-_O`LoX@KCUD5&U4`jv>MRvfu;BRB7GcG-E zJ;1w!ebKtmn@~*G%8r_svCWo7Z_S#xkLxYz0OsZ8Q?5t{D6V7&{o2W;)J^*FVz9X6;0J9A0ZZS>MZi zPZAf=J2l3rDW(ct&s?-(Tpe=u4DVV1GPtZ0{uPgOPqL1}{({UKlm9BSSdCiSpq{^z z4j?|4@u#D0QgRUSD3f&oU3E0`TErO?_cw7_PoK-?Ux9v~xtC1(y{__9hx2m3!v^Rt zJKE_BPw_TgzwSP(Etu=jd}2nrST?=MIv(`l)IXYg>#+{d9Pz}m zGSEKY2;Nt#ogsUOoUp7Zar85aK|2sq>`guJzbNm~Tz5jwLS5RpeXz6B#iEU8{4{eS*(RWwf;uB^`;8> znv}=1e1@N;8^>Uy?*pE+zBh}$O4xkV_z)e~m16m{F3bhSi?_W zA^)W8jWC}2Z?XBkEacw4H}9jyzh}^AJIB8<8MLxk>ZpDn|CGT#@dBA(Uv($XAbopj zUnSTWs`W%!*cy_tb9q4pWk^@T);t4ZsP!+ZR`t0BCeOZ$~IIFVursp6_vawks68gZZ(E;*x zY7Yg?yQ>e*N-nX-<##dTEsd|@?Q73@H|sXTbOSm zN+xw3pNH{vUX`&dMt+GAt4HsNV<3Kx?QhnAXU!Xp1Bm}{j|2VzCVv6tHV)9Uqj&s; zW6Bq(zKz_J#9u2`wRiZLvTq*~BUnM+9>s}fx85}CdNkYf)Spbo#ylHeyJBpVQ$sjU zk=6+*7n1f(%6!ih%D@^`#Tz^NU%s>W7Y?)^#3ealfN_AyC0=9;sZUm1P;NGW6if8{ zUvqTp0CG%dPt0(QliID>WGV0RprcLPh9eHNQ$~;##y;GEVYAwi)9!vj$dsTAQ}1rk#89z1CN=9z2nan&yrDEMLItMz&guKUuY{bU{7( zmZ{+FWZJR2Tz=~Ne{1wozU89MS}Wz=PjDLYCO>^%CV+Xao*$9c18xINhf=aGUD>MR z0r`n9rH}7EEHAZ825-q8LX4?>O{KpGD$-;(2jwt;`{zTc3|sQle6Wh2!p_l@|S z@X!21Nn1{~F1~LQ>#K1{yo^tT_MI7{#J|ttT)x0PUHE?=94-%;K00qDVBf=0F@l#u z+B<`C#YsF`knzBQk$sehPRxR*Sw2H{+JmL+v>1QdXvaNLa+75_{Iimntd<{|cVmrD zXD%`Iw;I(ix z#&l}W0&=W19g5FS+-v&(`1;)f{ocOZlSiM&d(2)@MR8}JfxC^My4V0#$wcq_3CGnT z%}2P$1I%B9V`$N1z9@Yffz@T|*G?}L;ZxDC1lQr@DJ zEdXE8qspzBZG2{T{A$#7trs~evLWNP8h!teKQKIsb-IIzWBj{Mbh1 z$~XIcr1faLj=lX{btA_sdu)V0TlEX%2xXt6FfZh+T=(AIc9oW&HpI_oa(O4l<83A; zEI*kgwca6fqK2U$oq11%V)Pnj14!Z6_j6Qi z()Q4sP|6OFt+Y10(!P&*^2+On*1%@o?F{z&p98+NKU4y9DY<5URg)(tPnnhtpgk0v zYZPSzG|UeWH#jJVbisc>>q5$9oZ?la12WH*mA5ST=eL$l!Pk#3NAW!lo4Z)1+50a) zAE1>z#VVnXit{JS53m)~P#eJN0b34^i$IDSPVJ#79k2mofjhh%kccO>?d*?@)iw0B z$_HTf6xUo%x(`c#j>Z6UUXimrm$d=zp`5cr7KRNysvHq1x46od;v@O;q%+N$ZtCT6 z;CawAGQgTZlasTWy^0j;u_JA$d}Mj*@3$Q6}F> zYv&lwQ&#-5Yr`V=SDqK**^%1{2^)a<0j(v-*S94bpyXV@&6L;d8`99uKlXCUWA(rS zoGWHH#!vBVz9>%6+W@u6s>gWtd{d0?4fy?*L{`iB{o=B$bB$})A@1XDbU>c+EF0id zuv288dKyKyC0ob%7R?0mt zFx)q&$Ub=-Z1oq`n(a?zib!jQ-h*N~fH@SCFE6Wo9Gh9LeA%+qC9<^@e;22$>~HA+ z?Fqk0^*TWDuH+P`yiZe^D=oMDlvkPfDdEel*y@Gp$;IoL}BQbyt^n)N&9&=ogdID zqewTkahw9hc&9m(_27Z_8LE&6xyD;$Ew`0CPxAV>k!S5`Z}!8W?`a#dt?@}~Eoeu- z8{v`NhawC4uB2RzzQqZsm0L2}gg&*u7%9_qKT8j6Y3skH^(Olr`;A)I;){K8D zS45f*NaYY>f7RUAQmN-5`bsMn?2m5)+K5%SIcB86ca7=6&ezH#B4 zv+M)gC(a7G)8d^v(a*^jC)-f_0TqU?0S2{yxcUyumoy&rnQLb6uyv8C6OG>JnI4c1 zc+BK?D6v;xRHk^JrgP~4*#YIo0>c04Cbw8ezw4s5>`oqKGu(szTP5N{=X>P`+$+LZ zGpYcKf5j{-msyN&vH{jG^=cg(K(bB_A@+U=bG}NZ)AAhWQ6$?&zQ>yWPr1glS9wRj zYmw0hd7fmaH|YcMz?K(XMhw34;425F_WTUj;wTSRDZIz|lRnpP*gF*4QtNrcJ;U{` z?(pThu5yy|jefVX^+C2Ql56|{{jQ1XnSOTI|4YA{y*4Yh0shJT`@7`jE^wusz0Hh& z=>hpFl+%*YA5(%)X}eQ;@*}hl zoM8Gt*_UCSTg|nIKXJXFbBoWoeAdE8`<(dmc@DYsI_8*&++J$pSTx?zo}b~EM|`mQ ztRl=SmPE4tB&7AGBCS17jVmpmLjIWC{p4v2{cS2=E%j6T-GqJe`- z`v~6sP{e!j3H+NqWmNZ3Tt70p)?K;yt;r3rVZ^t@dk^JV&5nPS@3&AYE>M1e6Ds6E zcRXsw>*_x)r>|a)?9(817Qkk)xjgB-cm5^ZnfsH^|9n8H^i8v&nL zVc75JYj-wsXZ(ubQT_+zxivmBv$u_MVap#Sj8sCBW#%i$s~_&;3C{anZPu9uy^a6A#v7h-d)EEg+38KZnxK;IrN} z#_&M>XNZ55y;@m82QUT?bAzI9-EEnAzuwJwg!saaIoT&x`<*Jzfb|2G7g~QDj^ipR zli~r%KU-YSd-+azdYiHPH)T@UqB1&cfOTfUxL~y>`6SDaA2l928`8c@{|^$6Ed7l0 z0<XdpXqm%Tf*#tv8=Ic3&S?VHrdk1w)UpiI8*y& zsh?7gQ}tnn_^drNG=^y#yqmGFrCYQIs>zjae3=@%+xP0-Iv3e8l|8DD|2HIgJ`PfT znIX_mpd#7UcMpWN4rPDM&d=aFaa$})iK|g{jl3d*vk^IbmfMid}u1fJ;Wt_*1!@~S@ zmGrr3OzJmJphu1|_EpHc^m|;N^=`|j_;upp2JKzBJ%YVeB0rey#*%VW5hHWHDJtLp zal9?rHuwSPgTirAEp3u+qegz#Sn+oH((P#Pl6K-`>c0|X`us}S&i!g%;X=9B20~hE zxd)^@S{{PlhQ5Gm)CqC9%;kJ0Wm8|JG5lrF?$CH>E=W4AV$8dE#_z=+@w6Q}5t6@v zc1RQQM%;kLLSf8RwK5mpuQ`UUJ%ZBjsPr>s!6#Drx;Tvij&{_^*gn6ct}fOz#dj)O zM4NC_{9;!<6cgB=7!EV`Y$SebeRn7m8s`}wm)ka!{9gyn1NBJO)u24XlJtSbfAc~s zLVH3dK(|7|+FOwHf_w?}&<*N)Wrx25Jq5{^aWu3AG!Bw2qcWTFXDyu3bN`d$8j#kY zXk5`HYv!E37~8vT@Lr7T<$spXquRE&a|5gJZ1wHwcU1BjyMVEQd_$ zeX=P!-=Q{E`)AG0OrNLnK^m{!1;xrQ8(*HldlGm|y(|6h_~#B}=co$tPZ{0w0Mc<9`_z@smfw{gZTUEE12B)gWy4gVZN$ zOfeK10gZ$T4$}9-p&^iTnSLkR<0p`w+0gpM<>rH>mS!I66xvg3 zFsju9k^$D~n)zL|d9`{V+C#G9SfHoyEgj-|&xO=aZ5n4o)19k)kYw%wD8j#vvH>>j zB{qQid}2gdi&7XNpKi%ESKq3B_oq>;84obG5$3C{ zc09oSZCEJ#=bpgmVZKK98h~evq8ei z-H^u3G5OOuGD!O>KX@1uRS70-Kcahy`(PcC)^C;Y1*P7H4B^k~sA|8Q$@D)vxSqc| z^=bP4#+H%eHqh5lj3=xMF#eEg_-^^1$1yC2;?~=SGR`;aLY;lS#+I_deh>W&YWjA_ z3|PJBLAG5u4uj5tlJO`;7HMDNC&PH5+I-R-{Vzn1tn9*QOx7j$!v8q%&wB5Q?Ox^q zSqIh?OVo*G|Kv1yuV)^mC3H54;Y=WH7=dv1y9=HL=naW7Eo`{DL>)7gSCm zt#@$RisARfCWdQ)D)l??Y~rm)78XuF_fhKc8>s1k&RHW(ym`2$ z)#}u|kJ7*7<>^lxkMcs7w)It3_4$8?7KY5+R}-_^7p(*bXgkTEY&Ok%UIHmjDkY0x z6q{7*IYN1?rcctn_yq7Lg!%l{m&EPJDr0~<sB+VMZOh3~ zcC+SZV|+*V5*rn+53C37$uY+oF~if6y4sXa@okE0+Y-{;cdy`e(=>V$7R0t8$2p)0 z(ECtK7IhA0kw?=fb*|~HMlQp#q}EcYzc+IP%3EY|2HlE|s79ygy>>ld(YWGIl)cx; zWK>q^-}Rw?LovQnH~C>qY=+jSR?DMu?W1SOJ|piGIcUOsT#JP5Tym^t@)@5(8QUP0 zxe2A&7pYbbs6C;dLh|7%zA+|?Iv3WkOYs9&mhUqyf08*U%6lX`GmJm3MXu2&r2x8vR=L#_Z)Uv~A0AQAly>viGbUL`V4q)-wG@ZGBSI_n16Q zwO~K&|4lvte1|Eyl^s2Ap?wT8+R1+iIk?1DY8$dWei+S@g!Qj% z&4%T5G`7blt5|)r_q)-ZWpS@K9>pkZ1sPkv*D%xQRnys z*w@nJ9IYvC*!D-Y9+7MMu+Tpp_a}^cld^6HsXwim2lf7;${LYkw2pvMIzT)R=LXvb z9T?6JD*ucb6DZ%E&3P9;hqsK?ls7^1r6!NM+4D7QKi-jQ)6UO|3waLOA2KnO9P7;z z|GoxtKm&;`A|BbA4Anw#Q$QJ;egsdk^}jto`zC=hUAI3;h~%16SyfHTXy>BYsGb?zhf^#=6HNIO&!9XAy|NutZb&*{0;G7_6wj?5kliB!+nTFWZYyKckKsGDi@jbJ^W10F zB0k3P0LbIMS4vHOg+?YMkMj49g>HsQ#0{@v`l0IMhAYK;`aaD8{8jRsCXH8?gz{rb z&QDg^A^F-ih2)D(#by-M86I?t^E987Bj!eEYp6e@cjjC(|M=d>0V^9E2SF!74%?Le z#>5RP?{95!!^>+z8D28Ems5i2@v&;9w^2dtcM)Yy6_s7%~2zH0IUn7O9h#ska&vOj@iy`knT z)TfoIO0W7}^??2ZS`az}x)YiXeFn+anc*OaS~^=d*pi>!@`A*BG0_Vf#wfn}=x`ltlG`G!B#g z)i`}Tq&8T~L9&0Ag%*ai*4WS6XV3=z5g`YVCo7AJ1-k;$TyfmijI91my? z+J)m`sVT+@EpjA1HxBCb8`GM^dXee0r;fMIzo*HZp(N)pj7E0X zaRAKQ@qosgH$d+30I}xUV=3JCvZvz#+5b0{vi}u7Q(58tU7iD;13l(IqywY}G#>a1 z`IC8fP90CvYz-L zlVhs3c|zv@$(b+z0`G~F=Kt4*>RWODHqQai0mA`d57J!K0?@UP=BVQsjrwe2huIU@ z_yziEALeSt1d9JVB6?2Z*_!uW4DzkvIp8_aAPxwB5%uM`AM_@a(gFI6Euj6#%=*@= z(E)ZZ_ELVo=zH`qOD`L5p5p-whjEzbD)tN0F%}h7{s~O zx4#3Wbbvlf4?x89Xpb4KXASqmur`6#w`<<-7S_LR7~P+E&J#Sh9qKc((0lTKG_q~{ zyFCZ$#Q}rMFh7@aN!<)N`~p@FXk0MA$rq?Rv`3kisL7cR%{Z;5ZXgb)Eux_=BOCLzL4_Y%%38~6%B`UkH>yfx2P|i=YZ#c=YZ#c=YZ#c=YZ#c z=Rkcp5Y+i^MxQ}t-$<m^-sjLGS?^Q`k<0G{GiKyW-EF9J9EAC3sgM* z#+Q1)G;_Ih4JL)xOBP^4c>PBuo*x#Udk%Hq5EZ{<0i)uVp%yym;f?Yp%G9bntCj(;aabAzH$H|}=dz=hPTz4>NTD<%Y22F}zcQ9x|HP;;sjQ`xh zzy$jo3`(%C`|E?!5K6Gu!N3Ij91!d7`m|*IiWod8dA*3i-CduM{CyFFQ@Q+)(e)IAQ_WK(!726^H9&XQQ|vElplRv)r4UebJ=H*y(!WnNPT*NcfF|jQonaxPY6I!^Q8p9aXrz=6*X^4Kx(+25P)gL{HyBvq~hNv z1gNLiClo(FA%H!-Ui|nziRb5VJq`ee_$7>tystaNH}U@7e%%4DME(4Ahk(Z{ghucu zjz8D?XZ7oG{prGAvtN(*AFlGpue+67VOdWL%aF?`10kKpKX$(+?`(y z3S4sCdASeQ%kf`su5#@s2`tzCy6#eZodr+>b$)Q1bk~4tplOcZ*{^iHYz4Vqwt`$Q zTY*El!M@CO)b$Cj3(*Xpih~Jm1fUcLTfKf4J-fyl?D1CzvSWX1Z^=YMl zZ&Xxw*UK=#sIW2&FxSh7Xm{6*jxNVQ)xW#z3n{#_T@m*M!LH3$C zhTnz13D3tTu1_#uhS$UM#N~*?vFnNATc(F)o}_0bif3COygp6W2PK~$Rxo_1VaE96 z^-vbV>!txxmqV!yuj^R}?&;o1dTDrF%rcygKf}BZZ#E% zu76Ve_)r$3>*;5k&l5V!=e%qxQ118By7|2-Q117tK)LJUTDj|LfwI>pb#t8-C~CDh rE2u!(>l830bn#zv?r8PI9h1^RnYbz$AC&qg`SH7y4>oxj=by?nm?`GtEyvDzR;_dKC&V5&H2sYyH4oRz0)SeE?%!q zQ8rc}eZ4+x!qX?k?l_}EKXzJ&E@`(60Bb^~t54CBUy8i!beeyRsGnWf7P5*B?9!*$ zr!T~cu(|&SWpZ(SZRJt%y2LuBRu=6h9AQBfP-I|3O0j4|U1LMMA;H+1*yz!HJbQI( zXzFlV@$HHvv6RIXts&kHn?(Dv#_s%5W`v~uBklw+z$QB>75$!wdqrE$FNj) zn?-y#QTx~QDaNK0QJ_UZeZ*#Y*mU+No*gNibY)wJPqg8?){z`))~#nIgg0X_^F)@gHDehKojQ-D9-kf3sM>?>z>>iQd+LlcGtAb+ifyN0C* z8|UVT3APF?7HqN{ry^?L?lR$mc1?%nSc->BWc5cO^VH6g;L=*7waz2r^X%}BPpNy3 z@6^SmW($o;1G-DWZRII0Zwny`>cWPX3T@8l(3IO-+O=75!N?>@;JTI&2lXk=?yz{P zz)+P`JX~V4G-A(ds%va-+QEPq+3d}X0(Q|_*8&rPBA^YZ2NJ*<5or&&Z)e!JQJ{>i zmuw?oXERov_yc5MC8dV`}hg)RhO}*S-~_B zS-Uds!MQ9nr1$bFocjkD<1(jP}9^tlNlk zX+y{u3aVq+#wOYD9w|$qfcHa&xg%1yt&S(I*-^mvfIbXd3w$@nE;a#Q5|PC_dIPvL ztqfQJj0|Y9Ttup~?CS}419@evh^*e(G`7|{1Y8H7$`5@4{7Q0IkbXm*ewk7od+pcj z!v`i6^kOL8eKcd9e2|`GA6@hBrR9&$gR$BD+Dln7buWrbirDM%=UM;g6DVyFH88It z_3leidwS~*j_qr${|+2F$lFuE91;2Hj@E*};lPVzPFPMCk){97z#j$t_<0kUAtHT) za(KOtuV04ORt`V7*BYZ-Vi`^fd*An;T>KwbGjrC_jQ`#>SOpPuX$ix>b^_<0dlr|^ zoy+c*o{w_3SW>}^9&2=gVV51@=(<#>2lxhPartsvKfclrwbn5Y2zQkEKbQV% zwB~+J{MXbrMSBMDjLRmZRLN$WVkyFVj^n+=yP#n8`nBldBCJS+rshq2>F8saFku2D zlNjUpVd6N{WkrVJ+ne%uz;YolBSW)?fc;(mzmNO(D4;Q*Z6WZ~j#_Dtbw782rblS4 zOY)JdETTRAwJkD)O`0ef!=^t!&m+Hi9Ft5jvb~R{wsuS^MbWDD{P>}VX>V)eqpNN~ zsV(fP(7>)KHSt4udwYY$HJA4XT?Wn(k;l>?aoZyz9{@3}_3wZaeD_V#TF(}dr?%Be z8;t*;wZ1BgW)p!4BC>2}c0P0L(`0jgpuVyOzky_jBB_kyDX370l{ehZn7+%2j2pv> zYvzM(qqOG33$LQ|$Qh*8Z=~b3C7T`L^aCTPivE0yTqTEoev;w*Nh0!;f3ngWz@uRg z4{$8cAIJi@{R{)X1{?tF>TbA?ftN+(H-iAa2Ige2Ug!DF-2j|2)L?brYl`bD)Bq)J zKwz=!e6H~-6W{nPcOI0-|LhTwCQqAV+`e}Qeh)n7vXyASaH7COt#z9IY!s1}K}CB4 zIRCI&-PW?}T0-M`Ja?JCB*wO}qP`HZN$-+RpI#j@EEpyae;l&!>{T?ax6DS~c`s|e zR$CpTE&%8?)v?2XCGmztR|c?@0}TP;%r{;BpUgr#e;>FEr>Y|@b--aF(vu;ov=?w2 zaPk1Jwg9+GM4Ga+6#D?L`hd{kYWfu}5WP|)BHcsf@M^6O0)7Vc7fpH;xKczm`rk=w zJqB3mquz@|NEYYPH=N+wzgo8(* zg2&Zj!K^OpoZUsA^KRH|e6_${wY;YCxlYB8TIw1<7)X=XH6*C5j(rvA(Xc4#_|Yc= zoxq8}V;Q0%wEt>= z2E18B)@!ZT1%UqwcP!gXNwn4v17~L8GoA??tF@jfBK5uvV1RaDn5TWW)_Nka#0}y4 z9okOMTn?nT2aNhyxx9S0G7ElRZ0B~9Mrm z{i9-4Z%&_7HEV-qD!i1&D~DIdlz}g5cqs*E+E8vB^LYpGY)}-*o$yIOIAB=6%)!jJ ziO4b$iF=MlZx+-8kYj*_8Gzpb1X-w2z#rYj+dxsI%L2fW-uMp$v_Da69UiRX>v^wR zfU^Uz)9r%UZ?Z-Jajo^}EGSpfU5~PWYqmRQT^Z)id%f3ZowptQtK*IT`$~cRT8GMg z@)Yn%2wsZXl-tfoQwq8j>?&|xhvw$y(0OUOaqnC}YseC|Bsnqd?#HTjE%MA$= zZ%AC3g6o0j$_y(*hO=BwKE5CBKRw`Fr--zE299;aHQRo9)umYby5pGztoOAk1)dG! z$sQoi3h+JRF5d>YJK(mdK*ct`*KC}5_iOJ{-1pfRI1u`vd9YOGtehM>zYBf=^!VsdBS6@wN50dlFZy12>d}?j)bvA{#s-{w zw;xS60=O8bOkkinyvysTxtiC1-)karh*zVFNJ48}gHu1Wi>Gas*7__F$+Uns2Aq3V zM0#BfZELNc29EPw4+H0k$bvyemcqb~1EMQ6BC^ug=W$x=XMm$UZT|(>Lqt~e^O&@i zYzKY_ED(_n_xXispT&*Z5ny=|xg@!A0)I_)jCee;^e5G^rz(@`#640@*}_{9!!JvO zBp|MjJEO#0MzW|@<5oI>k|e;d4Pihl;!JM>#@LCt@T*%|E~7#^tC@Li^u&U4SYZK8|s`w z&c16JSRaPW7au5-?m~k(v{!LLr?nX=qTuXyjg9jq*G`q4#=;QD>LgigFu@ZEEz?Dk}r+Sru?@@&Im1cn}(Lcz~7pYo^8XY>0Z_ zr3!C9o_5~~c=We?n+Xy5D4if;M|L@uMu>-0+jpOmuRhv26)cT*-nx( z=T8G`MSY^7ucq>hP&rS`Y15%IIyGGuJRh~Wtj($KZ*qa(WEl*wU!UTjUPV`qQ*T@= zFsIF8l!PvQVp6Q(t%k&p_M9BM;*d@?vdB1oMwwBJPRXvTU`;?bZv~ty&{~i1@%6N9 z0`BpdZ2^w)#hd+3VbJ2P96+mrGiL8*jC$9{lQWnWU?dlMk~`jM+dbfs`vb$Z)|+zF zq@yxf&KzY7@LoUHm+Z9Q_woOyUY;B~cul!^_S7yt^2lzB=c6`{72Eu!&}OwZbSq~S zM%Xa7EtE6x{l35#yDgSP><3yV$4)u6+nRZ@`*FRcpxH94s&8DALAy4%v0uL<`VHXr z08nlKt_kRCph{4CknE^no*^QC$d&zU56tC|bJ=m=Pg{V-`o!W! z*rl!}HZNkhBx=~B)iShL&dL=Un3z&r-5MfnG8biJDKtE;B?k4MG>+EC9=L~KYKX*C)r^2x5Shd^l z2w>(At^3Ux!jb((x`U!KBGNUOoWaf@*0y$#_5oJL6SqeV#}*pcx7XsR9*eo{A#QCB zaeYgO&=3GWt$vQ{vN*#7{1OAdDz^L9YjcHnWBN2o8}Q!&T~+~?`RaHwE!NMoa11vD zIC`@Nh@VG+J$#oo;3jk2ey0r@Q(tU(W3ZU)MjbKy!CO&oWG(zv82D6AWwS!6yUcRx~~VG0RHAWycIaL z|Hlu&tE)sL;d$2OY@KCYi}O|0THm;i1fB`-MfMkw#crEuhEVMkZ&`&sC1kH}`W{Zg%_|?ck)O6Ke3dT%F9ckw7Lny{n*pmjwbln@ zpg)Unl4`4PGU=BCwZ1lQ;^ZiL-FF%^_}#H&g4RY?k=!OC^FLGClOYOp$1~Nj)JR)> z|M*UeZsioUC5K?Ebvjh)lU4D?%?T}op}wJUV7jygBqzIC>Dz-C!V;Hx9WX$Tq~4qlxHna8!8!huxlUV%jWdwRmQm9Of+x`G5=1Gl4e-(8i~+CCm9IoFB4U>iP-288lQn zaW&he2ASk>oO+g-0a2#|+-EIwk2>HD5&3J5ul;Lxyvsbta-44<(vEta=UUvaJw5X_ z-*sef!L4ZukQJ_zK1D?00sK4Zd)~5a6@}Z^GdLBX_u&*drhH-I0e=r|$T)x6*&I_- zS!6?5O3_nf)NvQI>qlnvS}ZJ0abAaJkCe(7!7=U+-}Wi!R)+V(@OIed)Go~kadzro z3T<;wS*mKdQH}BXZT|!^;0vRJT&J$cjoK^{kxl+c!Z!3@>uPcVJ`|Dn`)S)>S+f$6 zA#te&S>@@CT8+T@qN~`oq;I8@Rk`-S{Zi{joIxVNpM+JmF1-P~%l`OLPq({*-A*H5I>r?8L0>h!bid|$&p<8{} ztW(g!5dDarKkO(XuK}+GP2ynU(*gb0xf*Z4{`-60Pys*J%^Y-(TOP7#DCR8N_sXW< zf!Yii_{Z18MpyNzNAB4a9@eW2%D|j9y_vFfSWZP}5cmrXEDxJER>l)ErdG#pIkijg zdq|Jsy^vr$#>oO!hiDh*8tORJ06^R6``_8*pW&T`jPoa<;_x0-)T<0BoXAeMa(MgV zh+(Z|_*seY+hUtOA8^A!iOtvpCdKM*Y7Sk0P>HgmWRx_x0I)7w;U=HHL$3_ z=E8Q(2-~p)eF1dE0HwBKWynx$!~Yc8oYrY=RYsT|Y_Bp1YEyps^0I9TdkCXFjSY$S zdv+V~)87@zzVAgS>anoaYgiF7)`5LiMwT81{t-2+{#0cDz1?EB2w3s3v8MD{%2E!6 z>ad_bV(i<7$;!sC`O1g&iFi$Qj13!`@~fDaLCKK1jo#XtirUH%8b%k{DiVU}2luK6 zFK7=jTDBC1TW+N1)u`czWvRE9HYBEvs;%6s(5Q-Hn^EIa>WZi#CW>_}7B9RXGJi@c z^VYh$#`Ycg+x~gUgFmMYF=29SoG2cw=~K1iMMZ2dYqUu&51Zyb_{~T4iCcm;QC{-$ zPc7c4OyQKuqT#ibyO!5hjw+f`S)8d|UhdCAMr%Krh(&(_VQw~2iK0000< KMNUMnLSTYT-W{F* literal 0 HcmV?d00001 diff --git a/web/src/app/public/goalert-alt-logo.png b/web/src/app/public/goalert-alt-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..3ae1acd8a72afda571ccddcde73141a7e41213d1 GIT binary patch literal 177876 zcmeFZcUV)|+Bb}&BjczrGaNfjg)j`#2|X}Y3=mLI=_OPNB@jvsfdplgaTL%5K|+az zA|geQUKN!IElO_*A|Xf%8VE7eZ^fB2$2jMCpXYkN@A}?9-m@>n&Cc4Z-0NO<{oT2G z%F1l}w|l=86BFAGKY7eXOzit9F|kcW-)sg?&g({B1^;acKIsrDCZ={!_;167Y|{ua zv2V-#&e(_9BPV4ktWF*GhHqz>hXQaQUf!6_36M5qchMIdXMH3{>F9XKo;P~IMKO8zJs<1G(W(^d7<)X|XFGf>mi)zE=z9+KD6(9~1c zfU0Zgs%mH&YUmqkY0Ce39RQg=7$cF7xswsK!=CH4uF~dv4p_j zzq$WFf`^2 z8XfRY-Z}M8D#{-{D&!S?`5zHTPro1`X_bWw`SUIBW5_UX*a2Y+RSoc?cSaLx2sJR& z(NNWZ8fs|#(G-F9^7Fxc-qb)!;nFq-tc3v17NLcetuqt8d_dH+Mb>os@fU`o~k-tI#5*)Eq$b_mzIGR zQr|9QeQ(=Q$t5vRbSuB zTh&Jws;Qxc^zioZ^7-d%tTBE-P#^<7?^QS}uTQhmN9t=Ec&mEq=zv)veY{omHNh`W zFAW1-Z(XPknC$_1A#WIhZorg~!qW&l0G9gcp}mkt1H%7!sIBqoQr8sz_h$o#|E|Hm zK0h1e2X;g2fV_~u{@4IpAnE|2!DyeJo$|Zj9bkXV56~v`(=>DpH1$9K%;B@oH1(jm zLP_%Ja2lTU3k5ui`%^aAdWZaZ7vLxV={y=DJ^xsQr;wYyy$<~O*zezN<^P0ee{_pI z?+wWHzhLk`nuVf$!mvn;_hDZ^cWU(a9P%S&5VTSrq>ThCJyu%dy6 zDpKDEs;Xh2t*7Cst*@o8hy3UI|2BUu4MQz$AnyNpK>xSr|0jVx&m)6;y@C2tKk&c5 zBEYP=P^5;JD$>gfu(7s|jw;eq$3s<9$3WZLM^jTn*8{Ne-*@ES%8~y+R^*8eitxsK z#%IAu3=-&JZ%iocfDZ;8D31&d4)F8*bllVmxlHvkfnGH-kv?I} zYo1pw>-}URi~Oo6c>R^g1K}T)(U&6s_RxL%e;$euEE7cmw>t`Mu9j{&Pb2E3dJ)P~6y^%^h)y=HZvN4p zM0+N$#i?4waEnNvVpNyBvZ-f~_w0dI=mxQvAzr16QY(icMeWPN*^|(gT<_^qi#+)1 z<7&f|uIU8pf+;PL4raFTO`^z<#yvlDm%_i9+938u8y2#TNu4UEQdwy>+yZ#cOPw=_ zg7pG7antaz_O9G8S&=*_(I)KM3uGkG}{~Zw)w7sz^)GaD!lpxWYV~m5b|Y z>h#VCNf*(%_h)Y6HKt1aId1=eYy$7Erti9Cn(78%oh z`TOpTzJ)?lreELMq;jyMz0ia}FQg^j%2{cGd@~6rT}#uMOQX)hXgj{JcMikAUhQW~ zJ_?)g#naQ@C6N`!^~h~O&-~T~hXdx8T~tMe5{obtz7Z_ZKI=5q*8a!@#$X>KQLVM= zQYV~9V2H`O=eIVByyC7T)eW9ZH1=pWQ>s##QY~H+=at8Iz;nvmSQho6LFIf#;Q^6X z#Efp;0c71>3YxDDl`O-uS$d?If!{{%Dwt~Nb>Sc6i;}L<>JtyW^y1XsGHRzUtkov1 zAV}BBm-xzxL@gq-<*?mOkpW^dPMney6MIxzwNb@(Ub`+ODMt`&UXR%qT}H|g#F~|x z{g%H?WUFHQ0PO003Gw&_sKAt*7Ov{er!kwF=*bHjubv4U1f zM%&DNuE6=d%`0RCCM!?S7t}st)f-h@>Plbd*`rT&;Kx-NcW;UI?$lZ*Ig+&{_==$; z_`sL_@Mv97MS*E@(nNOp@&~LWdAU$W@V1AfK6A^mXNdZoa~SjV4vXu`{rG}az(5kh z>GE<;>Fh4U+0nB{WMY~4lb$3Ur z4fj!~0f&R+z8Z%}x}Y%3;zv%5!=#j%xe2P$)Pu{5u6PUV({Dwl6;m+Aj6-*B(myH# z7(b@0onFYSfKMdaVBR~&A-f!JlB04*WjWZvktb7@hP z+QvhPri139T;guz56)st1}Z3r-&n3Ar#>N{HPMm{QLoA-tLPz5T9cv(#4HDd8?4mW z`?x6mie17#y~|df$Otg)UPM?xJUS#uSS)>MY?p(d=$L6r?Fk zB!;G6TT-MOzGkb%mU!dCgY8M(rAq;-yr6UYMBg3yB)Q8A1Ov<{lAFxL?m&cWO+V|y zwZK7DW>IX@X%iO?^2L|*6`m6Jp;Xi2PDF*EN5L*WG=1wbt<)+4Q$ZNm89ii6U&AM{ zeoz5~{OpREH5Zb0sAU+Jk5xv@W8rGM`*vqT5}i?8mTh45&;x8&(@hgXW{;>`Zj4YA z@?xN8kPmOlriC{qJ@!T(*GtoN>-R5rSMie;UPokPWdm?8#y5kCsXJD2T|;JNJd+8M=8# zwx7+;Nl3A@w$4qe%g$T2Eota2QO_g8b4VncPU{j#H?z9os%_WGK-Rf92cNoHv%@@9P&oRpKkHgr{YzO;@&(o^KcLj}{_wFSww z>q|{iO5xHmp@Conxi&5P$R)ncp3dct+!=D9#5Sz!yEgad^ol_Lo1!wHSar+PMb#O& z_&G|D9#-8?_N+}qUK(Z~Q2hP%C~76zhT}v+TN}WhtpuR+a@KrMf_Tyr|C-9GWWy4x z@NWKBjg@Wm#Y_hq0>PP=Mu}p0kegM@b7Q9??%eub?jWk5#~hrv&n|bPAG$X>wLwee z)Df`vg0iG_<5ac)UY)z1lRlkinUdshgF)Do_&;=-$cb#oa3qG`UJ-0JgCtZsiamV(DnCaYS9MY6Yz%%0SQ=K;{(MBC%FEev z*uRYBWc6ggsWoAa-+5kDANAr3BDw$~B7)Xx$B!z{UNg*IUQyNM4Ukq6!B0*>Gu!u^ z8;P~VEUvXaFDXX71}wOZX-TI0`n3IR$x4TGal;<_zK7+iCf`hWmpCmiGJi3l@aWHC zOYjy`bWJHwMrwHt2!k7>T}rr+iuTt)r0}<+>PD{|lrnSn6Eo?O;;X0R1qrRgxR=nE zYe;T^(x7v%zs`#7q3gW~#ID1ly$kh6G0k|q_Ht0JW$m3L?%Tkn9g+|n&Z(t!!Z&g~ z$;M6p!K!J^=mP^Vu$7Zt7nJm1v;%F^k-5rtc>6>uZpHvu|dns4fCj`Jk>$p|EKEV#CTe!JK{cy+2# zw~?ELSb2)SPfG6M3?<6N+*PMgQ?|OBg-;CE(gp-~SRhNw&F01uyN-*H{F6mcyaDj< z03+E2ZP~(q_tMvnG?Apcu4_89oFPbMW*dH0%xll6h)jXZij$lT5|*T(LIGv%cE0O1 zhfaH)ldvFgY-tYrUZm$wnkC&cbfRE54?bU?HEgZd(eG))?EpOJMyD^TbxFjiM(*rb zkaeMO*Az;}9uS|(3ohQzFb;lkpOdOT3^+tvdNFbGfJnDuLubvlz8OxXPBLne*mJXW zH(~O$A)sc7R90%AET{gsg_v%GkXnHe>-Nfmj*$2d*(!=7Q6=qRs(T4nOqdCaf}Wo0 ztZSm(2G~72@*qDd$SbwC^#t7Y~WV9v=2FKuTsUq z$Poo}CUv$>k=C_oQ7phURNM|*=FRsHH4F$mG%-xJhT;}%@&|k;gKfv=U)5*cHM|&fi zQ*^h#$%@oFvb#&cevrwvnBg(YK9+?YC2G=~HeN@fIi7f^mr62{1V84Snx#1+p|*R) zqJo{lpj05bql#_yPFJ+9n$wH`?Tf%zwQIrr=yURoFDYr0u)9gzePa)az;&up8kF9< zvt9PJnb|V!XQ8d}`87lVGq?|C;HkyFkx8Gs6~(gwPEk^r_S%h0t7WYE{E935bn^r~ zC_3{2ZvMFjI^%)EfV9CDNq-W*gTCkrwX7dsYGR}noQMgXzPg-}M!UM0YtBs%i4PgK zAp^sB62-6nSP_x*zHaHU!nIb>1NLbleK z3yk8rbctEP5Dm%HsZu_I&+JyPrlifTzH{$izPJy~Ec5dI>iURnhtnXeCv5EyExP%1 zgOhOJDooYunQ)AS_S-6#)|$T^jz|A;xQgRJ6o6Tm#4UpN!XC1N>PP}xgwQ5#+H%fz zKKUx8A$>2qT?!f-FF8A@B}P>3J<6<4^76nOY;KunCAv;yrT8$S|#ED?)9!|3bPIiJC?^Z)yUB@^>mb+_(9@nVCAP zjGL%D5~=2-D!6<9NaV}W*o@cDjN%Y;zo?0fELNNWe1Tp%aL^e+X~dHyzOZS+D2k>B zbo2R2XzZ57K9ZSg8LrzVjX2v~8xeV&S+EZUKv=>D9YH&PL4R5gBgL~=D@fUo5SB{=t-vAthwm?zn&e~Rl&8JHRs!ay=SFEthpER?5>>5TN9)$=sJ?7s-hAaN}iy6c$fvQ2MFGorQY)}74-6AJj3r`Lb$`Q$3O zY(8GvJaaIx{YM-wJ2~xd3|)BtYYhrebGd^$#9NQ7Z!}KuEb6B(q^&k(?6M3_bS2{# zC{dLKf>oEMl^&MHgg$ljtpXZEyH=;8N z$+~NSIMF7lAF5~_Xn_wPc^ z(3IV5RVnD)bgy(E%3K(A;?y~EUuFAPq)*RHC;ZTDyZ(yvxda7CGo|l9=+cjK3QdIL zdLp|Gi8rDuZe$LHGYc-l5}g9LLp4`$tKAwC8qK4w%HtbDOY7u*H(!6zmvNg@bD+_R z`}^WagSl606Ui*4X6VakJM(&hS6ZGbMuOSdz4CNHgJ=M4J2Ee~zGiPbug;+KSxU|a zezFpEXR&)AIJb$J*htiGUj|(wemSxA%}i4H+KJx%GpIQY+|v(Mi{DNP9!W6v_Ro8o z840-KBpdoD&#J!2GQva)=&oqzE$+FMJ`sK%>{s<8d+knI+miElQjGb>d@X)ikVc~j zesuYfkO$Phnd(^(vI)NjCq%8w&vCjD%NZv9c@BH-^0L|JnSY29DB>#+Pu|J)iA*$a zIciqWRY1ea&aqZ5-pJGr0XlX-$w)AIKO>6PVlc}H@M|#z4vMQLF*v;QmRNbIzYiW8 z)QKb7$QmUq;jhrPCRE_sY-%R)udTWGuGke~+>)Sc61n6Xys%)-?OI-%4D6iKMg97! zxEnk4Mc{X4)1}Ba+gw0+6x5A&T?izUtJzorq5df9K82D4*bUCMD2 zK9Q1PJ7Q+fgK*0`h~Q_^R&W^a5onUKvn(!?P5j4dpH({%*54YBzv5undY?TO$X%Ko z%~>JLaT5N~T^hVPdX*d@8qp9lx_C!c|7f6}8^sINZ3seo+&kV*!LX>qMKnq2X_k}q zlhi%wrAU4xxeBobN?N{8?(FmkqAv_a?%D9GC1U@`8 zECc4AO|uA^WKUg&bTx(QoTo`ykMTxX9&P$RN?YW&Y*q23w(i1;GZZ z>Ve_?u(`SJ+IQ~iMFUVvm)tx;B^gMWv?+Y%A*h_p{$C#SF5x32L>a>9@*UZa(y$Vk z4XGBK7cx}PD-~$yXt*;&A<+>v-|1v1c$f`ouc95hZPhXj>>E9k@!D)4^K}EjlEl~<&3JZ(+#ZS&4z+sH8aBe;> zva2Siy2yei8Sj6+$Q8wX=9wYg&)N_Uj$iG2cT+S)8hxX~Y-m=h+m_t0!W*-VHWE!S z#AF=t_$EpM_~WYZ#RMBqz6NgTNPg-*R_i&+%E#AMvUUBeVU|J?dZt|P23o4Z?~Zma zO-*xHz4Sy7Oj7w|Sovn0pMP>$Hv_@EW(xGPxa30Uyn+5bw4bi^DY|(c4(*#IaQ0Oa zRr$r_Z>BEN-%h2DGvKbesEpinR4LJ3Cu}34mjv$T51JWO1@O|Hug$0Fps7@c)go)d z;gl;*-?-V>qN#87m`$ z^7O|MyuBOR>~9lDh@sz8ZNEmozVPijEK*U@p`4+6&1k#1>xEh}8ur>}{k`-y`<81; zHSMM@+CxUx{EP8gSa)v}`_0;jg|aEEP_zDg8B07d0&|)eD&8gIk8+K=Lp7!D&Fl)X zv#`;FOENVz2Q5+B7ZP1WBPfkl2Mjj#YJe0@fK8^Jd3-s<+H?TLhAhO~f|wF4l-$a~ ztb#7v>ChV5+ZSXD<{PUb+Gko6vxlNI6y1z~8%cafBN{MinblQCBbkgm-0PWAWN$S#TBT!UpHP&3%H46?Cz(q4QVjp+EKz5J2|9zl3_UeDUN$17>( z68$*5DhH$jZc`oV(mFPdVR&uaZXlKTq?MJ*mzBlTl{c@gIHlDBdbQo$URExDd*@JN zR(cRaT5@E)$Vpo|Pm*Unqai9+BKB@M5R5k$44k$wo;;K0Quc0q=W94f=IEM+Jt1Ff zrm}(w<~aIrz}$4WtUVfp9a;xF2@16PAoQ|&&YbYZUTN1hUA{q|1-A>ojN@@euh6|8 z1|?(;>u3%hyOO;wTO%sYKjq+tuT-V>)c8IRjy2+uRZ;>U7rc9Rn8ufx_6oX7W8FTw z!L@*;&C>rP|$2C)&>t3;jti!a*Xie{|k|}Z~uju>q z;72;vM%nNB5R98gt}%f!afzcE0-9;T8@8RAWvzw$)pMFFT1hAB&m^wm`i0xu$)=@a{~A+x*MC*vYNPK@s*esS2u<61OnfTiFp4>sz53fE> zxx}ulFb{7|)3JV+u+RAMVNzQ4L1}kr!^#x7<{{&YIKNf*>#sgy5nQSJy%*l*ojX{; z-%GonG8lEbh_=ogqQ^By>{8`hq$L8Qi4!_I%Bcw>_*X}=?X`E$plaY15n87=dza{h zd)QBT<#|6$vg~urG0)3zvuw4sKO#ZR*?Rvk4Vo3mCdc-H+g%?m3+8#vKMeQ~#qpJ?cFw zYuDo9e2Q%vo^bZlUG`SaG(j}OXhugOyIf(DzD~Q`7J88-%W~)qYUp? z#vX!VtuUv})vPcn3fh4YxpllORd{FSS`lQt*Yu>h!K&{#+5f@t(mHyzF3DPerLLh( zKVaLH)()pVnS{W7BoIC(b(a<9%(?3DXzKz2NHHFSubxM(6-czc9xOcM0-V{#9>!0; zJuQWh_IwNOuOSJ-q@A_VfY*VvbxWs1P}4H>jSA}k&W+4G$}qC>@oLokuduy7@2#tN zvZ7oiW)y!%w(=~Q+XVLY8Q53sjUZB$ZJ^NGD8-8#7}mG0`wQcZ&a7{Ni9?g(T z1n?3y+d!jRJ{RufRNYA$RxBeBYzj}(cEHu9AsN=Dh889V-F!<2$gFGD>r2-)?vRpn zl~dbTgf-9GGB^`sx zk68x=p@c_7ZmJt$+E>HZU4wkEmO3c2A6+MO6^*_KHVQXWA=MN;?%rt}{ppZ6535tE zHbx_XVJoL9%nh%Mu_~V(Xql&!EL%`9%uYKH>s=@*q-Oa0n7WcyYbcJK8>E_?#=C|tJR&rqnm zR!Ksy@?;-+WDKjG#2}NGJG*O=Y&Z^NbyeC?kiO_t7$?JxOUg`kF-XS7v?hzHGVknm zuq6i2h7?ktf*=k#g)cNU4za1}?E^esl7)+g`v~)Lswq)pN7C}Dp}H>+45Vv0@kX}l zlQ{L+z!|R*9Zl)U`KB(sjOf%O9{meV@oaOdQTLGT;Pro~h0uFQ>ygf!9l0Plf)mse zObqK4Py4sqxnZ-GbE`AjX5BJR<~pCEVCWQX49WNaZS~+4--8RJS_loTg!3f!BpRw04nA_{6*c(rG4vr!a1P_Vcz{xaIIL!r6`s-Mhfs$d> zgNE>kj=7n=tiA}zieYHo12aOMhteAUfXm?lEogZTk| z)82XG`8HbA>w1;Hgx-S)>J%Ez#qr`Nw=;>1tCRKR3PLQD)syjMTkgG(L>}<^a@C)t zK5_z~r`xO){x)v)6P z!@uiD3j03dN%<3c+cyBq!@d3D#K9^TNtYl@NCRtpc}ehbx7J>B?#H0CbvY{PYLy)GXGskOXaqPa)36TLqyJ1zBw3>^>HP?4;++{jDdy?&|6) z>*NNrGQGC-&}q@YrF*VQ**TZ~O`q_2>7I5nQ{gp%R*I9Ef$>I*WI3b3Tl=5+mYqQn z2y^m8)A~DC$GeT+3?5|Yp>Jh^E$@d9(m}=)AXF^5m(P8h+hdAoQP1tw>%N>V=~7Wk z95Q(%Oghg#!G*wGH8_verc#V2j-_qg>@?AB$n3858j+uy4Pq!3<8QCDwKD?F=yak` znb0o!Hx7sUT=11fbM>P6w4r~&j~cCd0z=Q95aQUH50xE>{FrXp_bFH`nJZp}TSYOx@&i8baaR`J4?h+@_0~nw#0(3N}0NzU?f;)7-`{NSOVs0oMaw&j{;RfmtGlyh`U>_0TH^g1A z7T8f|Vuint9bJWHm+Y8{Q@eDs{{!=szpJZ-(9Y(xr{V4a2ei{NA`Ng#DZHh!xyGZ6 zm1b2wV0y@D>Qa3O<~DW6=ww5{>I(x0G8NVd*V{Mm9hoOcP9-GMF&`-_AHw5m5`)u3 zywI3gvs04kRS7$byV?*k>WVc4!YB9Wmc?;x1Apx%(j2Upy$iUsDYl+lrUh?1*$4aD z3QgVvqySoG0$Vk83SH90Dj^x~R~#A4lC~uEA4uOPsir8Y1_uj|vz>0el3i~&x!TH$ z<8RY#9|#2jJxCf`kL0Ywd0Y*}wdCn0_FqC`8jN7StMIo~w7;$OC=0L$Ifh#Adzs-U z=>-)ZsKM7n<}4P|wTV|emQ>ee2QeTf?R6{d_B!(ImV;9{EC(f{DktLYX{@A{{kmdV z1svB394j2u%#E-*YV^qBz$>XmHCVOw6XoGIPd-zE2Epw!%@*X?Idja1f!^D)5cP&^^5jns;&}7x z)lIcgBl1ekDUR5;kwQaUw7)2BE##?Q<-6UhRn`gq=;z3&B(=e}x znG&d}-ma^-42ty{R`9&YDRIL3(AC56CHRu!p5Z0DIt`QOpav3^$rf5+-a&LD^2#w8 z3AfVXCreL9T!)Q%#qZ?%o?9%>7RbRfqYHcc4yI6?!yhs*%GZHUJ-)nIFmZL3XITwI z%tAY3dyQ;<9&XXnwAB}H{XlTdKQT;>)hoI~LtTGi92>1{rk*Zmrwj8&Q9R7SthjJ2 z-IXk-@5q%}78jiIJ%rq0SDsym1DKQP(&DhO@YZEC_C{5-MTX%(RVciUXp?DEG999* zd=IzgVoR=r06Dp5T5}q!j-j14wrWS& z9{q~Y28r8+Kxnu%Icz1tp{?7m@{bDZJm*lB4lss*FP16-Dtl z#kkEhU|7r*D7Nc<%*-`!zquG@-(ogINvxm=e@CDaphi=q26fuH6?Zv2{C!cM4Al`f zHt+4jyX4ri)q#r{&G0g;8#%9G`+pNUr)7NfUp4i=o z$rGGR1KH*IyXEYo85BuQx)`-WLGYrxw$KefSM`DIVo||M%)~I?hB;MB9#S+-V%1Mz zE&eXzc&+PLGmkKPiz$=Ds4ODYI=sY0(FZKr_4nRhBPfah$I+`SKgMV!{xWZ}^sN{r>e=PcRUVK2g6+QLqkrUp~k z@9{Np1Bbe(vJg8eYW{31b@h6KSrM)7TpVut`QYIc$$>W$mm8>$4ra>m%otyP!I52C zrjTa0sV;Q@p9M9K7gy6W(D}#A)2ew80^GIDoPcpNt$4EZVE`Zi)K*E?f>y&*z{V`o zyk>@YPjZu;dsV6)rFcy)P*sz=F2X>-cg3H-P&78A(l@h`Wvot{Ut3M=SgkhHIhpzR z6`PswR9Y3Q(D20fMxV8$UVA#{u@F`^yC-8#(msgWd-dt8sdHZ!(tO`!k&@Z;hdYbw zKcxmYi8`n;*83mC1eQ%XVQ{eXGk`=>D>vxsT9vIUD|!lGi8GAG>98kxxS{hCFXHv22g*V+-VI7*XjP>h_v<>61$z5LYT*wq z<<%}_tZQ}_^%r;x>#oYV8}UrSbP<0mN1$U1#l6jg04t{}V0117gwfWk&^K(;SQgIR z>Vkx^z>a+(k7cCUy0LL4MQ#{6bF(<_`p$Rf>>^`{#?jzgT0|}?aNmap3-daf$j-** z@ykU?&iXfGpR(US$kqjFc&)Gg@J`C?hYUlgV8xZ@T_;4$57+hSr$@xl95F9Yv>g?j z`G=Az72*C&-P566&o$P|W9MIUDr~eGUa?>8v{s8`bZaz}_}*y8{KE9j21&mm%fh4Y z`#4DW;QH=g=YP+RuhVR3u$_O}@y%fv)RaHnoV&z1B{E5|_j0r&?n99+IHe)0oI;uG z6M3{g)p80g2ZJl3T}wQr*N^b=O8iW#^uKPAHT~(Lx2YLetLQv9P1Y)hrBR@4@ zYTw5`^(TURJtXRg(eE@w()-tWv|S74oHF5Nla034dL%7}kSvzKff_PKY6AJXZXB}I z=cIJSp!|4OAh0$++5ljw?$CkSDG*6k1lGsTzM-tFFq9DA%Bd(>+se`|>Rl-4!yK&h z^U&`!I7OC=1im67ZKeGC4aN<{zNaGkys(G}00a7I(>Eg1b~h6q`|iLgKExZ9v{%yE z4`ViZAYg-A?I4RdY{ON-iwkOK@vYzK>+8c(Kru#rv7Of0S-F><)~dKhbK1spq^x%F zK}vNxz`3M79bN2K63STC+FH&Q1mz(OD~%3yKpRUfwHeJe zCF`05Om4Tm?>&=bxUvJAV$F}=I=e^ip&myzDYl7Zc4!MvR7&*+0V2;jgQQXe95;G$ z-+hR+e$RP`)5|qS?yQobAonT~qz->?9{i9v(94=%R^&UXR(X`sjz>Q=4!BO=mlVac zNozi&2<$_u5{fwplVOHFVHnTPbh8e(B7{jhiKf|7x3vi1l`xD>qRCO&j3!o1A{JAj z$s}*4az{XdmGUA!+DE^)yEfb-%lS6e74OBp&w<>>x8#*IMX%ABhYg4~j3XZJV2Nsh z;uqgcA%~P{>oR38L1{(Xw#x~{yeJyrTt+9W45Z&Vg~)`W(1_=4^1>0TL!0T+UsfNaud>?8qTiea@PoYQ;`uMiaSxItRU`+^Z zYIG9|pAe2o(R6!ft-Ag`8?vnX(e>k%Jmd)n$1?WiG0G5#1wC2E-Ib{{%2;e8`-?Bs zM)ut?KK!ewW7wFZ0Cbl(p&UB`kgrL5fv8-`JE1+$skdmRRwm3XqthP>O7oCwW%!7> z75X+3FjG=^l;Ki4%-YWyl1{D1ffim0lr2ps91ruf|TUYs*z70y7&OK2p5xYbQX}3N} zPpzuEk-0BIvZW_?(?=1u77I`~3=7y7x}0pNm@y(()^UGl+Dy^2ll@B&8-Rl&iL2`w z40Z!w4W!;ElHn)-%DXlm)_+WiR*1y7+hgL8G?``E#CCJFsU7%>fx->&En}JFgAEu% zv?HXTdWdcF8KrO1BW zmSYoN!+p&1LfmhIQl0y$0Y<=%wo^-S&fD=Gj@_Lc)q|ngsp{nc7*%hr=RKf?u!KS(dAX??=%3_h{Rm7Pul3@YUkOpg1%eN&tHYf^nFb)1p>Ehq zyBr&e-@b;z+*W^gt1aMtnW-x(6YpxeT1tJ%eTzZ!^Q}312R{t;h1ZCCbIwf1nTYJa zN&CMXxpUZ_yRMVTD~k`pIi2db;v2F6Ds|o-({PKl?01~`S_W7on;iy^HM0tlT$g70 zCy(z%3;X=@eK~RWe_2q-Gc_)}kvWa!wL-7SjB2Dc0z{V3gBD!*`so)nQNHFsZE3hU;=f@}`&DAg1 z16d~8Sra?(o7F_C6B{RfR@(x~McP^d;J6oH6~Huepht+sSo3^sn`#3_z5lku(|~N7{!uZqHNUB0ci&3=sF%@`YvqaJs(MuC01#_=|4KAxJY&xc^Rb|S zytfXm)vO@2nE;}%=Z4l+GRUvGImc+FK_&8JbCzI!u#T4L#aK1dT za?8<3V3lEb%1aS0(BXgQBFKIRxl|sYVncw6swf zKn_)z(RYf)Czjt>#Ba;$wLS^h6RMr>W<#Pn zBsv1fg^x9lsUfz)T##>Q@SnfD5aut;>PX+1{{TnNO3dHvIH?$^QulNpV7*-Ruq%Z> zP^a#;B6>to749|PbvGpo!5mL*THFfE3e?@ys^%VMONtSV4BF!Hy1wNtz!9!;ayv=S zU26J%<1G|-Lf)K5J-EV*ZY$~mnknGZuM_9dA89H3P7C5&?X)!v$g_u?zM8?HF`VIF(6+}l)g?k z^-u%dg_#Dupp?oL0B-Ly!R!WHuD$%K_bX`0NeA-a;S0&RE00+Q(zylByyGM>(vG=X`%?2e~ zN`paxC)()s2}^pekt+&F#pic29bNSc1*w?Vc+N4dzUC=!@G^VUGbr~(m_Y~D^o zEV3$yw~Yl!&98@S$iP5+`oKG=7O>N4hL_ROF?afO&Xuq5BGZcreU|n*pc2$s-M66m z_vO3hLg+?J+J?EAuNsxM>yDD{KTa@ zvbx=%?)+*t3)@%922Q+lC%@BFEb1Yw%nKv+!eO}1jytX*dmK$i&&np8yThgy!n8iP zW{9S4VtAvo=XKivHcBsxb#~feeoHRJ`20Z0=9nSipSMeSVR_0Eyjfi9bB(#V>&Xw# zkbS4WsP%m$hdx=lAJhkpoV6kN!TepTx>;MU6|lc*oc-8>_`wG|fTF?hvPXpz*_+__ z)-yNx+?tirz$C$&e&^j6%5UUUEhsDDGT}<<=N(S?7Fj z5&z~Vk==CnVv^YDmBMrdjpe3Lg@R4HcB=y$0(_6gc`dOh3C?SOZsm)3{HCbq_nPTL z^Pp0K*(0NBot0xU0Z<9z)mtiNfki?1bv7Nz3e?TRY&Vq8M<>PxiP#)_*?>) z(L6Y_vvZ@EFQ9E8rxe4X(|T zs`nX%oWwO$O z*|Oz-o2mkD=o&I*bD~_L%oc18(VErbOB$cZABKzvI z>;MuraPge_$Bm7ZGx>_@#!Q;b=CM3t$cO>KiQw56%-S34NIdohCe^(bY ziBrIX)dJ_We!@PDaU@dAeMUk8lrcOaZ*1Zf40W>#VZH+2rRJ+hCZpi~4uJLr1+Lap zkn7E5HR3_8B3tzH4~Jpb-NCmM_<$-yZN>4IA>G7A3-kwRgetueDc_ONN?PSCTDZB*2kY8-r{LPuwhcI$G>!gJGXmc!VC^q+hLKt7jwhthFvZux; zg6A6OXPVCXEu^~vb(dzlYw$w0oS1G$T_mC&dDkVy@lvD-L!O47k-hGmD>1*Zddo!b z_8P`3k@dRgPMzOC%@+c7+wQK8^k&sYN%Lwzfg(+p?{-t}#mh(_B8f&0^H0kmaNs`) zs}5T6y^!KkW3MWjxFHm%aOu2`i+5cK4*{B%?RnovzGsCMUJ;f!4M}&hqgL}ah#uJf zkWIYz2EueGCWo=U4=xM+R2YCwKOdSW{}0_)%SO&BFl;4zbq8rb=8Bm0m%7!94Lk0EncOpM-)@}VG34aM$^a-Lqa6~ekY%b zm{RMduF1neerAVk<~yf@CT05&{TGOulbc2D9;3@%|LD~LHAdkiKFGC7^VC5h7bwSc z)_6})**EbLgd{!YjCuxQ7@WJ1uZ&v2 zx0A@$MzIqUZI1{r_qtcs3d8mFHjl>@924CfeLmm>Kal*kMv){T;~irU8hmg6KYV?4 zT+?0q{|rDuOhQ3=bO|FA93eTny961IRGN{37&IdX3=l?_w6v&*A~{MxhCz**!TG%y~;j>29NeE>-AV;g##8ckJz%6x)$#Kr^kKS#48*LeorR602M@9ScsdILy%3! z5d%D6pvJ*rdW2=(bnA?2rJ*KJQ#(OR;hXHPL8f`4?_gF*x!C~0tSS@m>xE+>!B6S` zL@+_jO7_%E@CLte@u7D4_l9KtG0^ryyqh_3d+46jNMa$G6mzF7^(z3|u-z?t3oHuD zBW~cp7Y4;Oc1+9ulG<@;p}GhGi=Lgc3j>FNY(6))4e)k@WjOa)UO7Nm7xJ zL_q&j9Uh=896PGHUK1Q4RK9^BZVoH;QAmBdR`mLv+^4ai))MnR|Kag>DmJlnw6hlrHdV;hV~D2+e&x@GZhXTJo{xg(DGWdRFVdGYq6 ztXBJ~a;E6Y`v`llpB;ge0Umz)5rPM|?bEAt%Q{~^`nmV?wv8;bx5L{7nR9`vIy4<9 zUyzF+k!`2}qU18NaksfkiU{-}B4XJyPRQQ;umYTfM3<4-8{;gQRt=N4mv?$n3J-=z z&jBc?r1@w<-vJ`p%zaPe&BN}Qg^)tG8CPEz*R#m;?9aAERFFilf}WAkDYK5;ygYe& z{R@KB)ati9!YM8!;>%8Psv!G#ChqL@X9b3Yf`dAuj-fEJwUFAXg>TjId=r82wY}`X zwH5f<9}#TWlB)gypSMx>=jOz`Den5TC9$y{SxnuV5eC`gYAP=TnU|}43J}p|cX7-N z5_rhWv1DBvoNfjUauKTG$)JXt5myZTaMD_^L~d*H&O5|%r#j>@C%t9C0ywy4iX^## zE@lnMSzrk{R~F508LUznQW7%mL3Uc^yttZ`!jL>eOwgiqo3H3 zuD1#k;eNBaF?7(9i>ag)92-HNnn2@%$>Y#pi8s0~xK98bOxc z(q;T%;z%ia@4(6z7t>XEtVho;&o+b~7%`TmH)qFvBzQ#|_6R#A)T<%%2P&G(s<2Ta z_cLdt6FM@koEv!*u_t7uV*Y1d^_S%^j&^>k|G7A>)5lPMNfOqQx+G>Id_CAfUt{Ww zc!8dIrSo^B2#dzSb+p15sApkbCg-DBTcY><{P9K$YKyD1p<1-=F(?B9q7X0*2C6pkZ%tr@2lrT3NrC6|8YS6nnNzrP^rN;c;mTXP6HBl z)-hY2nqZs z@JocvxbYXy46%q^O_TDck6Ll3!C|}>oLJela~oEtJ-&!Gr8lVkCEOn$r6+|2CE>nm zA%2W1RXqrU5>|)IV?`dm1rifpy8HseZ4+jGb3KOF#Jt`iHaJoZKyyi=uN_)bQT-n4 z58F@&iE*2D4L_H46XJNZ?YuuavggtN*@N+Wv@n50&hW{5{+9RxZQDfBrU6t{M!lPq zE_Y3V|Fpn(9^Yd4+x28ki7uR4OYGpWWaC1ucrRP2yZNr=t5i*ERZq-tpWeRs`5;`R z9Shk%{`7yH9uO!cGB2A67(@;uWi5=DF@U~(7)~N^^J{O#z0@BH8JZonEYx{=sae~> zzW+36PLfAt#=-j2Vtr?Vp{89$pnT)JeOo%UO8ljk?+DzVA>n@(O+Z)q*OB_Maz(H$~m_JTo zJ@NU{W&W&?_rJ6l|1T*Xr#=;sa#D zUR{(*I#kiY*;|Cbn-RG`Mrz_u$f zHig0}>*>Df3aHPRCF68XXk9RB569+62F`sLw5*OwlWTgT>_Z=xSl6kyQ#sHHOOy7$ z70M`z4L`^bZ08es`2E=j28lC?|GNL@ul3SH-@RROt5)`rZ!)PEYsCL{CWIY#CE6fk zwuDpB=S5!Ua+eX`&Npm?Hx8A|bbNs_EU~^T*O0Df+aECZ&`j!+9akBpkX>E23^qb< z);w`1zc<)Q{dj%(Kl|~|&y+`b*#eLG6e>{dH+jOsiP+@LQ!h>-b8y?HZIshGez28@;$saI7c%q_Vl>_lv<1M zuj;s(KZ_%OjVS<&I#t?TzV&OXpHOHJ1AWFGjD%?eERqV%hl_u~NhlR@n@SG=~nkVWo*%6RgB)twBMCI)N&BFsw~vvF1)s z^npYbWaAnziz4WraKiu8_>>4hVlhBf&MbUdEw~)Ge5~kOpw`68*zIDG1<3%PlxJ-Y zfJaJa<+{gyTeL8owdc)lAkX5Ck=}u|h?0H+k3Rw_ha~E0Xn^ zTfYC(A-Fu=yOvc0XG~k23cgPp^(`OvKaC}*IEZf3=)uHl#HFlgR@kr4R0xwQMB~Ae z1bv?aqlL|T4Wv1`33kkjH_1Gn>=!6t+i-_zBgSQ00=?{G2i~>}l#O=RuI4mud3-mZ zh=r2VP3kg2V*G9Ik{C3Dzqm*%ZM~(oI&S6Z$n~CRRJJC?bXIiI<)ut7#ybnO;{Q~D zzrUx%=Ven%m-$?4EXVQ=IY^Fa=GdTc78Gjlie&)R6>)p=Vco2IE1d_~e~JCHpOXV{ zE+sqO!7<@UtbpF7{&0TU=DQkS7Z{4&)i+}urpuE1+0WEnrp zyP*kD^~wb7uLAGmd@R@8Fv+8gR)b}IYaj2kDe{$u$6e&u>X8AUBs#uXZqwWZoQpOmwz(D=JcG3Zw72^o?P*EJ00@o0AN#__U9V0={#eJ_lInMvzq5tva znkU~e(wUQq~>fL^sacg4w&~WF?G^_GJ&w>v52?~P4bfFA&Mc8fTdy4Yz zB8z~_3%a6P0IY~j(zvt*4YE&FH|GIN2JsZpDmw6OI*|oqeSuLTfP3gM-Bu zPyZP7A74vPsX*y_&?0Hc>h+4jLct-`h@xX>rAx;<3=|oqQ;QzhZy%^H&)={O`(kk! z*o`HhnNz7vH!){r+_V(O#{;|a!P}2MlbX8tB~xxcCISnBTU{Mf^AZ-3DR znrl{77K7t}r2LZW+#(db)!KKZ3Jy#>MQ<#qz8jEaT9k5I@eJergavLeZ+_P@BM^dvW zdr2PJg@M!u3~jn77>!roRjeI1-n6+*cQsy^ii>5)ax&tFvdrDvMtcZ+X6&Yd`1rFx zSw@iqma;#KMgI|84+H4hH4#r+KHnH$QOsnDfgm@u*z+9R-g^9e+FUgc12zH~JNV$X z*?EfKqJrEt>QLhgS>D!4dEEO^wDq7$tlYDguO3M6 zZoP~;#ts9rijc+sl~Uwt0b{eP#jL_KWh&FfJQa6;eNBFAfQt~=E(LtU3SfRj0NpQ2 z=Jd5I*lfy$SW;V4bF?zeJd+_V~;y~LkOjXUgJ z?QQJ@21o2|F;m$6N7#RUCZ7hB986y@F6`*#Z_DrUE_*zp#&DwGmz&b+HweQT>oMHOK zD)=Xc`JYksrpe+e4)u4rcI+!)`yO4#7P|vKT{5uk_FO>gn3+s#+Qz(FbbjuFp`tF< z0h%4RsU>hXgpTeKZR$4^WmPw1)DlzA9i9WN){Y)$D=FS=ROLPgOti{@gWWSqw`;$L;b-}A{GF;LdKk@ZRr_kFFZJi5RfnjD+EYdx~K zxh>8RdUe{NlqP?>eO_7!Q0($MYKgqG)qn2l6PZaDgiWuiCU%Q|(?} zbr7DqroQXzqL^drYQbyUQj0|S?f$$td@i#MWwXBazDuTb+!?NY3KfE%O;LVqnGh26 z{@04U88F4kgJ1^r|8+3JaCyZ=c`t+?x6GYAk;tTWf8vz$>zt+50|V3cuko~xvKp>l z4fcTqP)sM$G;=d9PmrRfhcSM*T0*YVFkpx@E2_sEP9y?8YUD!gR*aN~#bAJTJABLO zC6EgI*!^UWm}U}Lp77?w9SbHK=aznvIBYkhTIy

`DkwD!L;cc|TfWR-U)OT;e~w z_{V1$6WMQFuW1uRja>hy~^ z{esG#BSc|m#=Cc2EfwVv0*l5pT3_`!Q%SWvRS%e2R?f%}0)=f3wYnvPf1O={RG5a3 zpJ`lh=4u4A!j#urnEL)lH~iO6=gTO=Je#@IA~^q;(O0xxBOItLvh1wCDFIE6j3bw==-Cc)bEFuP2cI2w zwNjuTlXrEh|HOsWr=V*+uAg>Ja)A7G;Zi0BEm#wnFe+o_htqP+;dFVyA$+7}#q^Z$ zhl9|3Yz%fcl)1si0Lh)L+L*c}zpc@u$z--QTD#J*sbNvGvWElyIxW`cP8Fi)!ISsa5jI*B*0yZpKR3-}Y726QTMROc@*Qo2tA~qH)7z&} zR*Dv#us$nT>cFbi!Q$RkoXfTt#{!#i*^((1U3@9$@YS?BQh#ui_DAs)(bNzJSjqMm*Nu}z(>Y;mw)Qgh z6}g=cn|<6%q9yBFkp0_=jHLW|?3R4%#Yevwu@cdgVNv%2MSNw?C>vP-QpK=CATUF2 zH$Fu0zOC4R1nkB|Cb289W#K*Kf>@xrro7RYYDe364di5?ExXje1rj;AHdS^q-nVxU z$kW_`Jk9>?1pFl-f0~~uU9Z|6wjjYs(rftTGT^?S7+3KnFT0;_u{Sl`rs^ey-3P^@ zOR3o6s$v6q7lFmKPm95A&tEHG<9)~1Chxn5jO}R4C064UcYlN@Gewj|1s~q`QrzbD zhEj8sO9Gjjs0q2EkUabrqkI0i9!GDv3XamkqGw}k{0z(oYq&`XhH?-tz|N^7^>E+t zdpG~Zod|PQojLV32rDzV2GX;&obuns{s&${Wjc$7r_Oy{53Uh zgeoz=0m~HGlL?s!WxiJ14TgdVBCu_LVh*pszt%su;RaKs!0UV#2pCo?yB?ZW7gGIg zl!8#qI!r~$`AVhhddJZ6ShK$}>)s;k@Y;TTfLv=j^{$Sl;lU;3jB>OJ_^7yTz;9XI4_)8PIumE8vO# zqhGJ`QUf#UasSrOd+He935ca7&KK2?T-Su9eM80ZxucDP;11hXZ#Kk5;P;MK|2g|G z4HfbZ+`V6NvxrAby(1}Pbk;}JMNt%Mc2(>Z<6RhIj2&#iUv$+XpBqY!pNzS=p(v84 zz#=mcsK=P4m3F>Hr=zkR?RodH6D}dK{nPWGynJj{bEHkHCcDxOO8f=_1VMJL6}@I4wH6e9K8zD> z4n`=!S~MojaZ;!Z7$x_hDB&zD)Mjf_nh5O(`d1Xq^{D-V-JFWDp1BCx);57Oq|B(s zJDnK4+0+1}@86evg;M$A&2XX_)La(0T3`I=9oWDtzC~H#QF9y@%p;vtk$QYtpCa(u zB|}{8)q$c25qFYgOKA6II?AlZR$CaOlmf2GB{u$?)j(pa{xYDW?|V(8Yrplac)~n1 zkY+eP?o|#Tc9!u$$?*|{V1Shw?iCTMy#bes^Z!#>Kw&*<`nnWMz1?Z*NHN0w4V~TK z;Bv#a4Bpyprt%b5_@0^JnzSjV(aaM8Qf1xFB2ytprGr-jGxg&JICEi}h;kKrLi3~a z^Y|`dpX#;uV{o~O>fsA*?}bFhrQVb#*UF}nlHgTN!!T(6z0j-3fzPhwDTFQA!EZpO zKJ0eQ4NFIbG0ja)^hNWUhSw8s0`wyBjMNPRaYA>)(8;V6G6sCp~NWf+Wwq=#ZZfw~S za=kLziXWB;#TPlWrGK6LZWA&+yJ=#Sd1dwHd4zNO&-#r5qrqud-K$w9FK1*8@R-_nQdRbV4gA)H+t8 z^DpfH8Bqdh<;bW0-VJJ*_T>1XORzM_w=zu?xy#FKyZkGP$0@A_&4KzP&4oveFDM9u zHf3$~&xV$X=t&vV!_?Ew#Y0}}MhP?H!}9APyBN|ilQ7y71z><$$Af^xP)7`x3n|*hu)6B}BHr zck;=dOFm-Rn$!9in7JKQ#kE=@#2mo&gB&j){ii-$O8KbAbjDFp; zcb*+jVWHv-#A>I4BWI9TGY24xnV!_XI4qhZ~Wl#h^t57&ExR{Gqr`j~vUYlyA ztdi%^oAkcxf!$Z*%P(7Mt8`JX#Paf8mG%gkzXd0kKLxsue32>?GG&B&_Mv?Yd|~88 zIJQH**#K9M--}c}!WfJ1zT$<3|}`fTr$}K86ElZ7IggsQRl-eQs4fgFqgZmjxRU=;9I( zC`a#oJHLu>L!z;BnT?R1gm}2$6zA|J@ zIIaO)kPqA=Ig3U^YAbqI_TXEsSSs#d{bZ7zw>oUn8n=VOOVl#=t-OYh-8xEV!l)%5 zpL+Q>2&5qVthnfvb^Ue5bsM(SSMfcY=@B}+hSuc+b@Zg#!X7>wMaMl$nK1xzS*q*t zmuuNlbB?y|o1G$thkUZq>m1#>8HFL`!>b~bAI7G)hOhM)jMUa_Y<(&WUzdF0;nPVd z&i=}{)tUs9F^L*x_igOY5~KdgM*^vxDY<5PQEQ- z0n`IMF$(uhKs!D?QX9R~y{@fTsQPi@S38Ic=ijPilt~9*^B))sBd|$upV`AN*qpTG z`wqO*7@B5T)4>>yqH|knlXv;@rcWP!8StqVasOVLY?I}hu(euNVU7f#GfOF%c|$+& ze%-{frZxXq#)W&KQg^{RIgiXTRxocFvn(7+M1aTV$iRdaM!1)6OxAYPU7-i>*OcV8cs(6E-Y5Dd5+kyV z$n7G#UrWqC8y-cZpABI>nM@C z>&;M8^EL{$mG}`WC9jPwtMa|t+eUpAz)S(WNv1A-n~*$Z?I@FDoiN6K-A(Q?Vm@J5 zxx;?JHGz~tu@7M(Df5iY;1u`iaf_tK&1RNoRDtX;aQv+UQp*-0OlSShH$bVpej~9k zl`r4sCtfrIt(2CkR^ zIbvLzcPkp2Um)FLz0ww%a+kbDAV;akr=B18dYJcjt-*+%@%Z+a6mlBRc?DPzS1J?&uH3XSN-rRF-dYF=E+7)Jv*WqmV#?sN*7d z*N!gRUO%C|^`nIFK(N^yZZ%x)M#E3>)NLG{W{Z$upo{M32h^x+Em7fGBs+7N-@~F0 z$>GD&j0Mi?Yn`@|$o=}tpPWUhX1lw2Mx5P;Gi|Pl@~-@geY1OQQW>)H`h_z3diQ}4tS->j2*M3j#5MJW7y-czkO}8V2hcvROzz{F(5pdZ zzcrI7<;C5gF_6IyTogpWV3X`@grBU#x{O$#uNQ}7o+Zj6Y&0nLpY@N={<>Z#ZEze4 zwYUnKK?oNMYOH-a0D^xY7!xI&R@~FWoybkKHY6gf01T3=zn7_ZMG5ZPh~))igfx(0 z22a4N+yo1lXvV~NudDd)2vg(k*$Z|QEJ}apki|T4kIH(!-|ddM?dC_WUy296huN|a zg5Oah$~x6$H`Od!UP5!~;d+W2Ob*j1WkaRYBVD+2jD;h5Tq;oM(XL(Xe74OE_wfUD zFJ#Q6=V$s6B8v}PYpPD;6er6>adP{KOFjPX?e2=w<6X^A#_H+QNEnk>hMm&SLj8QLTlAKHX4l4jXRu@z`0fE8aM_CNo<$!@-5gb)Zvo zx#CG=6T{6k>RkiNXZD&%FyKCm;Tp(!HN`Y(Js|J{#6t?OTvC481t>Ici;aiGqWAo( zapw{PVVd`w-4 ze4N8hUIl9)pMNz;tK<61V*-DgYjORreCXnp1v1BiOC?QX^0~p5n=_uwX|X(aU6N|; zL?fG#(OX^+ANA){ymuPT}3wDM%sv?wyz5~IfTwhl8yBuX)RJLlFF)hyiM8B>Y1*{Hypt_3LW0xk8@ zU1w@_ZLwU)3Lei6QRe0D(`j}i;jfWgtK&{5^*KY|cQxmh{0wL`48`UnxkgPFcUi&H zIk7%&!R4j~1;P7}687LgeG7fs{r^{S9L;4vedSvOLe&|F zqHfWx_Zr_)`x4mtFgZ7#9=*^E@IkCb;t)$soD-OZw<|1NYaZzN3euBYf>5CY-q%BZjM)D|i6$ldLgpVp%e|(z8s;mw zrFDT;zpm=PN{_of`E@pU#73uj=9wGyZ-2D}75EEMH@_HKPIN_xmU1k94#1#Y0HzX~ z5IjpOVQZ-@Rv_kBqeS4Z#+kA1$=mGq6?}0!W~|l>B+DtmA~G<`v#OK!*Kp7Y zvvCRkHvPPVA^@8aZZc~*J7QxzaG@=+eKg~j#b%Jq;tuJ_zQT4}(*`8RNfLC82{UDM zkJr2CXDky4q#lC?E`O&15>V%w*9@2Gi)ye++$_%UMok$_9IVP^{&x zl(5I@%?W=~1M~>~NkOzNLyeDtpYFF!J3F5Is-GHJZ;+kohXb%MTj>DIM-y`0tGOM& z?D!3e=PH<@=yUtNd&~0K2sbqH{NuiPpTeper4Gf$xWcY84PK$xc|B(0>WfM_6UMT2 zPg)s5>25l^#_T5N7#;|rF-?5NKe(ZX{jf>+HJl$~;bx+{Vc^r#O_;kJ@TGv3GQZ&Sgf)seluan~UI zZe&ScdZN)u-a;_D^!f8gif|-;=`oK&7K%zj!u?M$LJr(F3+?#i$WwhkJEfVSaprO%ORuYs{gI&LUla=Ri=F72DjUUmm$hc( z{6dhcz%GHI22TgP9>79NqJ4a$$Gd?<7=TX5useoY;NIm+wI3<0@t=KgAHyr%o=EEL zT6(cPp|6r_1ad2IA1fr;2S?~S^I}>7D<Nt(L{dW>a_l$r~L{zbx6Wr0J~ic|xuy+JwjAP^Ra zk?S~)gThkm;rx(HE&@VWc+D){C`{j>6l<%A9P8D|{oT;>-2>RusLx;)r8EB(w%D6) zGsa+p;SMXHD{W7)o7~eDB?Ay&+%O@xjM*L|R+t-Kzu*JcR0KRKRO$Y}^P4%;GL0p< zNx%Xq1)EbN!I2w#lRD{fH|Q-7_X;-baXJuX7L&wcIwOr>2dV~b_I zgjWn3GQ;Edw#ZiZWsg{G3>w2u$U%uD)G?HR^2hGL8}- zRMNQ=hbf05U7sRS z=Rs~$tj(c-=X7g{$QT|7X`EMfCGfJV*Ul5hmIOGF`uAIRK2c2UWbJ^y=ZH9y);_W_ zT{s4MxsEnh#XSaN3Jd10g1$G5*44dO{s7;ig)s&acM7D*GRMe0)^G+>(jl~AQ<}|~ zf0(QizNH6XguBz04=hKn)Zu%m)xGZP3;hQ4zg`F6VY&qo*}IJwBYj1+`(!K3?^(2c zuCwSYxlNb4O=`(igD9SZJ3zBe_oT^(tDqXPq~$kyWe;CTj=FIpw8SEaQ>M%+!azY9 znpxSp_iUm*$6gWC{_qDA2V^97oDmPz3C%@!`k4A$o4$wJ;^-?6PF+cD^VP;6Fl|4Vod$%QdQ1m3RHWkyOZ*K5%>7rgP*$ zaN=5Tmw-_>>4oCX&(BTP6E~fppkLDD6#M0RIvOTpfg_;C*e`&Hw}oMh8oRA%%u-v& z19kE$*%_ggPO9G%(f|9UWMNTrw?5mnYLJ81D5RX44=J5y0)rWzK#f&x1x>X9tAWty zL>Lq6oI4TWeo@Y@tZi^bvI6OLBkk|>1mJzWI7C(gC&HT_%V&7-q!Zt#hE9J)6mLHjF46hXc_M(izX_X?@CbXpN z=Q)@>638;F;ABRTbKg-eqw%UY3md_W;XqhlJa+AjA8a>|oUoEtB5V>4cYz^O!3hbh zuy1YKZOX@DmRvskUpPq`=W(F{ZlE-WmDkJw2wR`&7@G{iekg3ItLvgf$uPjoI+%&% zqz57gL|z(?h~V0l1&qlL1Fzy=(?aUV8$i)VW)W>v4rH+mW)xO21r9)%e5k4+x=nOy zJf|&Eh*d*vEmg&qMhDjXc#=j^R-ANwhloE=A71a8%RotRu_07#fS8x$&ogH0Rn6LW zvg>q?f!~~c6!Ky+t&Wp-0Vzrt6QAV#{GHI#n6pGlpm>FFj~M5gNRjoE)kcCZxePy> z)bu@US8tqqWNK$bsr&dk9)S;ES?)b@1!iLKh9ay7Z1RZ&i0-x}%r9eN6D3D%3(YuE zoLo4I=1FzEd~5{RS_^PDe?7&@beo@p`z#ZJVK{z(EU}-bLTJnSG=Vay+S(VmPDn8t z6y%2y`K*d*aMy_=YX#NS!lMK~%m*_WNKq7RSty%E zJw(B&fk)Tz_ksDj5cl2I>g-wX+)|t@ri>B^$1X4JKY_MWka6(6EfpqWQjR)5jQO0( zq(7g56CF9X?3lIRb993*(z^4klbiDZGlKSNZ$ifnq(8hYHm6&p%-GyzuquX?q{Oqls zlY-4%XgedYIM$rYyxcl(`Qux#{Zj;h0lie`S8uNFUv8wW-ZlN~UBkW~q*&^z;xSiu zje1wh@|igGZ1_R$=lJXv_HJ!kU?qwJjuHvJ477GCNf=gJM;cE9=?O%GF&u;gEZq9CXdgCLY3xGRHZT1J}yoGX{Wy@$}-x!IqN+JtqR|lzu!w$I!bU+pJm>#7A z7$ZzL36UKN9Ei;#{{y{!Amq?^36oL9N&f=_OX1->PWNp+5+Pp&@gjceC*eDROG|E} z4phY@tJn#PYxoB~X{n^vj);uitfW16v2)EI6tJ$eLB-qo=&#hyCMLl`!2a%9m^#1F zK;i?wO!!{IH8K_eG`2#-&KGErw1I>~o_O6G9X5g#W+(->t3-t6?^NS={Fxr^79^|?xZCI5BU(jB-k>MZB! zqU7|Dj zUUGI_>WopkB(+YFhz{Y0cex;wJv8TlIF_Me(xL)osUB{KNL+iD5)un=$p|@!Y{p`-IqH+?5+I|n2=FQbK9fqaChoHzMN10CH>?a;#B!AjH6~4P80G(p z6{c}Z|D48GT>lCuEKQ@C^ls~!Z_<>lj_vsZ=;(?~>+o`AT0%lUwJ^#1iIMBe8(X;2IT ziszT<_imI4&*Z63$ujDseiFI5Q~KTelnmU)X}8@zt_9*lzX5K?Ia=_t83(5y70ay< zfNns8FX^(i^`4#@+lPW#ONU#Ozyg=0Mk{40_@tRsokVqOt(Bh5Ec-8~h14%U;X zrxGCb@E$2#1-J=qsFBep(x=cNY9d6Vb5t3oYf9&B+u$u~0+!zAE#% zZdn1_n3oGs$p)|v!ZFx7Yy~AJP&kydum=*5>Eel*;9aeAcW6}FgZ3$CgjS_dfP9&>I${A7#y}4xmW8MjWOIm6oki2 z@4DnC%%xfh+2&Qo0UTppA+h?ddQ-|8jg`6&VZ$OP=r%y3b`{~nNF^Oz_@mHsDPFtP zSOWGmh-=}MDsi54S^CX!kX~eOxi(N74JMzGl4>Bi@J>dv0M7pDZ=dE#4kI(bdOTjr z`hPbW2>{gF;#1?^<5WJ&KRh^t7CW z8<5%p%^P8fiZEbkpr8xa+7BMZ z74-8CH&EWEiIlcK8GdUh=E(pJCH|bH60X#j2 z!H8k+WhY@RM0gc2Nuo3(NnNUG(>H>nC|*=dlA=4AQZLbcpbOygi8`NQ3;IxiY2qgD z{kL;I_|;L5UpN5f|40svU-3D?6S2B31Z{y|A#@5A%PAytSO*EeQF~PJi2*^F1qqU{ zD>#KK&~I%ys%KsnH{-2%5uvE!8;k+Oh4a5bK?TQW9w*HL_&F6Mo9*>^L>DY^_mVhFFL6Tw z;#;@)du3J%!pnAbDiXSQ@tEHGhAU6J10u%17C(_3n7EJX=g|9NtAujllw87A|yehC~U{1~07c{~G*6(ZtR51DrMN7d;WO@SvFI?Az%H zv-`&@WVEYECESIZ8y9$QhL!p!0#`+2%b31?u~ox*3^=ds|7gwTq$C^TFeV2$4*`N| z=it}m1gSoQazowWIp6WNI!82rJrlrkbIqE;{sS@s|HbZ&{zycIPrk>wrICxUP}?n^ zCaf;o$Oz+{X9=9KuGGtYn%!*)TLE_C=QmA}WhULb*KEM?i76}F;>Er9UzvjgFTXF6 zXyNK;o;6`v+tRMC>#}pB!+lB-JkTpg`^f}6EfIeDmJP(0WWCF^-ymla@>A0k5CsZ|}u_wURs{cuC@Udus5pWDPlGTOZGqDHh z@dKXqppQOUJODeLn%fM7HxOi)1*6J>;K{SoK>pf>rgWB+n zxe*4s8l8SHOp%9|hI${h3mzw86(a|^n<{gk>*4GdM(D#C_SRs2TY;EiIud7EV_B5on4m$yCoRr!>_3a@5 z2$Vkr^k(q*9QPdeX2QGmre@Mxfjq^#tF6PzL}$)uD#+^=wp{L19g8enUAV)oW5v!% zZNot8ks;yB3>!U+EywJCaG3Wa$MR&cWg{CVYP%_@^04CyPOe*oimfagUy`#AMFn zAQ)gv*%{xT1&#)R-A&SrnL;vDlPWUhQwq?@BZqU-0Py3HcD_t~--^)`bB8GLBwm5h zF#+abPA49f#9N19L4NV;9{uBkAF2?GwxZ78n;7YDv9}7;?$}|)@Ge8&%Y1S3o>S`0 z5faC8e^pj&uf3G}cKi%T0S%B(fT-Exu>{D@j0e^{+S?8$wBQ3nvX2Y}XZpl-SYXmg zfj8Bte(>MCff%|dBnaF?F;$Qrhwh`oEZuTT9=VzL)Ra@20KiClB<7@3l7c;sqc8g4 z&XWjBN!O-o&*dtvG+5Qh<@>fByKJEvP`^(_Yg23bGyCy+pA2ccZEygA&wBX$xzO`P zK-Qm4o++@xpxmI|6@Qp~YBCosh|e9CLSlhUrB8_A_qu+&v)59Zt>bhl4!|qqUIm8A z|6}*S-{Xrw7w59tt$Mcr@~31{mmz_^C8tR)Rc^v<@*bc4@n>6O5BUs+E2ms~6$48G zwlQ>)BaD(ZMz%$dgr!Yhr!u>ITF}w^(zmm;yP8vX4==fN*bO;p(@7?NTL+i>r(3(~ zOs$Yx=!A1J2M~~6S${i@DH@> zf}-Hbus2sHZ^r>?<-=q4rZiF*n#v3=R$gA`Wx z&$u-tVjrsjK}>SbBx+5%R_S*LXcC8Z<3aGia$j9Vz|D`a7xG*|FV%Ce99OW{8d?Kd zFsALmrd;N&C~D#!H1nKHaGPMk54>1_p6s%MQrp|mgG(0MiJ1~#1bGG9mLAqXeaUOL z868^g($8~3(-#z~F)C*z8+5;f=i^|FGlM^D?zzkt0 zKpLfrxD@yn3k32z>!Sx)40;bNxL0-m#>_9D226!|+^}iXM9}hsNpln)8Q-{LnE-V3 zk-I)UAM9cDxr#cxm}K|tv^$9Bn6`J~KUid_tu)P!Wfn?`%A2~_tV0|NwZ48fNLDsYRSkjI^R!s4 zG553xfeU*K#AVM0^UIuA`fXW=h)|frlVkMGeh}t=EEHcO-35?(c+=e(`-U0#%TXiN z%`fuX-4uj~6NzV+W0Pqs=i@pwi&88DY3B-cjp04u0^ahx;u3umTV6E(nK}CH&9ZA9 zhC9--l0SPpB)<1}W>WK38u$Cs(*;Ni^B+^Sk@K%k+9u>BD*nm-?JwOYN3b zJ^F!0#DbgTs;KE=q=SyZjDzkyklNtKj|f#f;fL1ct&4X=TI#V=zvpheFXZNn!V)#b z0e==X$U$I$$c_bu@*jEbmxp4tFqALvX`>f6@{0eyY{n4?^1||1M#V;$-d8|AtrpE; zCLK%-(EvUoZ;@bP67sGYF(Pax5;SMfwyt`8*8#;qlER)+>eJoQyKrI z7%kFa6B%ipWkp}=-iEN)&+|$)cN;B5;t8j}0jEEj^tJm?Ejlz5LgjgK_4;|jj~KQs zpqW^L46i%-xd0@1TIC0T*E72TPfa9Ug0XSXf{L#rU}@Pol0ev3M$~6tVfbDJm9rOrl#whqv`^NftiEz5WvppK#wX}ky0Qi` zwaESTrsR+1oN;74zWlxX&kRpP(}j~|*R*Bb@=H4Qv`14&t$lMf7W<|%;~yQ)ezA+o z?W5$9e4|2Jy{FF-u?YtH={G&2;((0ZBo!T-;++I|gD{P4H^^QS`pS`+4+9gfb1I~y z|AY6!#|wBSx&Z>sYnzARbzQk!uc=30tTI^a#c$NTbzCUD-y+;2UEpb6-zR6b{AtHh zd*}V3A$(o=tSZH2uXl158@L>PXV>C&Q~5-beZ1-D!85KK#av)+#D~tWHjh-Je7RvD zzN-Adm_qWr!qF9igbUs6dtI4f!|KP4zp6t%T>PDyknKcI zBw!*`(9@|N$-oO<8*@zVIRm!LD5Lky4~uVkUUg&j9R~Y_%rT#h1#s!Fw}weAmE%7} zgADZJwvA$vtJT=@_~rLgjg;8#LCd)d-v4d`JPZ|KSR4Kz)y~LijBy)c_4QM zj0vP?yp>3Dg1B%TMZ@_YuW~BTBP_QzZj$G6^yo1F=Za6`$*Z$~k?&&vpjUv9UprOd zHNNSL_gaVw&=E-+eKCN^1?;$Ltfkv4MS192~Es5Iu{|L-HKf{lKUi%uUg}#NcU0vHU+|Y3oQ-P-B0yZXN zD@|~uZlfXP@D65JAm`xuSo7NP0#n4o=M=ry^UR2zfetQ}`hCw%vVOm+O7mel*z3_`<7$Ho57Eo+@4OoWC-(HW8 zimckzVRYN`Bw@;??#7ETH|~mRw;ga6@Wxe@E=)h4duy>*sA(+iB|}wxvZkz}R8r)i z+DZx{uq6$>-)bUD6l!{sraC9lFI$l)x@7UZbEW!3ANibZWKi4!a4{BgO9_#i1wi!l zyi5|kqtR{(NBM^{23;XSI01ej02t@rTLt{8clJ~4+1$&u(G&jBimADY4LwD2SpJ1< z{xq2B+=j01)vA*5Lse~A_r6Z8^)5wgoif{>O6Hc!#goKGZc^g{yrB%lGQ%_kZ1 z?m8I&zV}9R8u?ruV9?qq?-r~z5Dz$;v|wd>T{VyPO7B}-;v3$oXBK@Dwc7uHfD2SL zDZ_5x$bHrgTYLD$UB2M+2G{_B1j1XVh|!Sbb}01(%mQ} zB|3DMbPOR3Fu>e1_ccw#^oUG)EB#1Fc2Lh)MNhemUA6?;g% z(W-=8Yc!C>mLsmBy`-q&zVliY!@>;jc+;aDyQ-t88uy@xAJ4}EX#wQe_%|N!&mz*i zr?M0nef0;V(eGyb_EirK4!FXZtKQROcgN(~PkW6H>l(Qfsftb-+v-kmSbyqXc_F8U zeEv#Sdf*jj2* zKN87?o7)$*Re(7CLr^dkZF^{dlyW=4MPTd(Kj>B;A~4tg_|H~0bNzD>#_-EeUwz&S z6Qf0qM-9)s&(Sp8Iz?_AXG>=R4nO~ZOz!zXce3Yl0yxI!_QkV<_?A_e^77g_dRymm zFQ2_ZqfeRXi}_2_zFk}5$b=SkfB%MSlB?$Z$pP;=-!mAMuOt|`CF>slqHj8G+fTe< zQ@&&l&m9ar8fBHAbXX%T$BLxxVOyO8mrIvBau0cyGGa2HU19+2Tid?hDZ47om~GSIp4PTTgmDl?u(=tJiIxteBIv z{bC9<(U1!OhcbcVrbccPF=WO_pK(olBc;G&7%L>x($cV^|1k9rRA0$JDN_=^hDivjIZ2H+&NNcIJALK@40DazWVg z8aeh2+?5#%Qu$KEduJy54?-@rlWvkX!;;hY3TLf9eL6kD44=O)DtqYEcNT_NoDf^( z&N>@w8!GKzW}}04!`Q?Rpv{?4jF{U}XTN14XbVef^j1!Hh{X4HpZ>qB#id0;){I9rp5Aw0Lar5e17tOvyI66rBzS zR|hm=vudnf@2%e~ykaI+XT2zM{EI^T_Z-1N16{G2=TvUGpOOOo^HA4@gAz91nIfX? z{1egIYoZ)SvN4Q1Pj%)!c|-LMF@ z9fW5p{{M-Wsf|--s^^eupqI zh#3$N5U@?Zz5o7Ti7%~y&h6A{!Af+iv1ncN7eQICMWeT2mHN=Zd<;mk+=KCh1ny@^ z$b;O$d`Mpxu}`<#;EC7Tkixn)U@MaTN75{-Fg#&+70X>gwV#KWlY5 z9g3<(5O3&QJ~KQY)7VhSFpXakKHe-TM1JXt&1ea_Qbtel1fv4!Y?^;FyIzp@it2LCBL>4KFgsjzNMLNK-1ByhU@w%!=@0M;mB7lDYNH3Q~L zxGnD9y}L2~#lIyKmtboX2u6Sof#WDYjFIhs=#0l2Nk@YB`3Cir2w*v1{^@-({yr{= zJtN?aaeZZ)^Hp()gSHB2W}5fJLnid7K2oR(O5;LbMumb~r+rVD{;Sv>9T>&ZWrkz&}^;c6_!&SJ80oS$tTOKgz^={Wl*p z-&&tsQJ)=$kGe|~(;U3Ky1I#WXY|Kh!{&PYN$-LZ$N0nxt>L6uAJt-ePOn^dvMqBLd~7Ks^> zInN3qn=+F@uc-~q&YBJBm$aPMOJl~hJQp}*ieF!)j^7kh139KO>4D078`_#_V^Sj* z*lJ=(NQn5-FSWvk2LDH>t@H*Yc2nY46I~P-GdUA%7Ge`)DmC~=iE~xouiu=b=*0#& zl9Z$^j#9T3xme&YVatQQpJ4p5#n)f?DhO&uo7uB5Wyk+pnO!rwp0!Uj6TdeBoX0q_$ zsp#Q|!KhbOG@}`$(0ziGWl$MOG&+n*M!sLwaomZT{Rxe;r2bSDv!+nY4V;K?leT!) zh#?LLJiHrggSLmhIQUin^8Q0R2UXSx=<99$#Z_5BerMhx=AzfPEXcXe6;XdM`V*uN zce}^1kt*#(#_Y$BAL@`!=zhPb*HoFpMzI8vv-^mFNy0EGY*FEizPqUDQ}Xj^>7V?_ z29&8u;CK#YMbz~4^vSpyG!?7O&4CMAgo1ldG&t8Sy>8+-wY~wi*Ek!N0wxi@hb+ct0@CEKZOWSW_ ziak9>p->y`Y5}q})6bpup50FvKH>}X7#fH^T#QojY|P5Kowj>0$n7dqQolzGP^UL6 zauoQQQY1qgvzDdT7l$ob4X3d#MRGhP5_reQwOQjA97()Qp=?QaVAzht4%wjqYizy(X8Ujm#9yb_?muec5D@*Ou;Th z{2~<6>YZsQ{}KCzP)^=NFxfVW!JY+#xS|=(e=guLVL(!2ajK68Nq`*db}Xpj<; zuEEuyN+r!FO63G84pUrA3{}tIUYKc0VPSTWnU~+6oSmHR-+2yotDX0|ySl03+!){V z+#-uBg!E1Li!fUG7fNDyvHE0uNPIlu);@NndWfb3P^6N5BYZa5fJFoAA~zju<6)j8 z5c60t$@j-;z;8ujFngr?E<)t4?f#nFRJW01UA|MEZxk6&&C5$ye;*H|Gb7JJH?{Oe znFE%8#l#M*P%wD+-S+2}3XIbbQ|qL!cl)@Q<~_qVG&-tRhE8FkMd;|6M34DIPP!K7 z@piq`vMi)dO?Z#z%JoRC03d6)!1WNru0%MAZ4%<$d_&;L$E7B^E z+_bUfHQB^A$_iyIgA!e^0A#m$DdS+&^b%4vjMYc5@V zl3yYQ8{saVb2$XiXY7NQ`Rd8)lb@g%lV@lfALH2sCsicCQ-4gPcp`u!FgGo_$nhU% zJvB7c#O8s2t{aiqz5d3)KXrGszhWphSy;rr&jQURzI-r>(XE+%ZX~}sD^G2jslXJ# z`jfwlF(CA#@pJ9|n31#RYL$I!3$w=XtgI{`->9j-4w=lUy5%A``h!t0yx}=~qk>Bs z{~z#0tx$f!7|$>`1CZ$6h41$&&Tt7Tz zFVcep7)fsC912&6{Dm=P0AqH}ugbd*g6Rr|a6hHut#1U^831QLs-xmH*4IEEZ3+&o z?)CDjiqu$LV-SAED*Q>F)W{VE&W1iZLsnXM!wSYRMy_>YWo7HO#oWrJH?Q{AC~XdD zXp1IDyutzVDQMJbARw&xcsDInVa0+K>>sg8QMbkJTiH7}%U@Y(2cU?p#Gh}tHq5(D zEay}weoWWwQ8h6E{PV+@k6ZKcC*CB{8fMf12-f*0Cw{<`EP5m3Ok9RF_|=l1PdpY- zc5VbFRZwt<=@*?H5*(0B`;mMz-<6%o$K=`u8RdcIL228heK@whR2rMK#j^Gm2$uKH zhN5@|j?Be^44&121;Cu>-!zm56cQTws6BVwYfK6sD9GklZPxgITs%9`TXC?r&tpba z6*@)pHXn(Gy&w16naQ?H7uqh>v9xOXy%6EIS*B3P8v>q7Dgm@4lKf>si*SQ-v?AvH z$9wTO6(x>$!gwiK$cVs%LR;VFBit6LvjYWX_V+tW{k3EKwfy+4{^FHh_h5sEZPS5{ z=|DqH)7~}horaCI%A>{sI`T;5)a>cwv$M1ACGS>7tSDQxUY1s8yHPDAJtT~Nt6A-x z1x8Q?stkcd(_g~vsgto-DT03X_pvLVG{Ff$1l7(&ri@eJj3+a;kB7y93W5iN|835e zAfuU+veBY7>O0iB3OKnYsD&?G=OL-v3+EQPcTfE%dTO{zQPdbS+1}Z!no+R19^>Sn zu9BSjyMW?49dcP_Ry$5i@^F=`Iq&ND(2zwbm_6dZg)M%^6ILpdre9pY24u-UJlH}~{a+e37+|gy_U@iT>Uuf{@ zMKM1Ff|z>>2y(2+Lg=Ci@iM=Hu}P;t+an9MXQ!o8c_QuKEa09o2!}pQt|{}bV&f7v zkFYXqm77>vNx5SJ{0@fo3M^YP0+wmcw%nrFL`?U;PY(Z<%%62`YwxVz*p?_1kFZM* z6i=d7Cp}QA+P^)zbg!xQYYjh`%5}Si<&WmtCos5N7(i{dL(vK~+yqAuJZanq;t#vH z!}K29Q}N%6gc^jC8KWP~ZPT7bl5A2rzdkBPf|9|QPYdP@S>T8%0f|~z;@y0&|)R38QObH6!3dU8hCaT0j)SDdj0gs|e_=`0i3 zip1a+&+1&w%0f6NLvL!6{78Rx9X0A{N?V_nh|H|>UJHfIiDkq2`jWZGU-d@VoZ)r|MPnsp=MpZ&x{w37Ca*V|AZ+Rc%%w@GhHpG zq2>U3tPrp@d*9j&r-pTO&@J{qWu;Dc5RZAc6nsI#&=;Zy=J zw;((1b)L$|cQTCMf{zupQ+UrY(I5rB!6pof+L=m4d}};73jsGTP0AwtSn+vA>6!n# zEuiFubsUpR)T%Ux3)kV@mjrlnP9p7CMMgk9OCyeE>iLEl1zn-Vex##Ex^-@G^q9Wd z=^0WdqswT2R>}FZwm;ofjieh|Lo5!329ZsT%h;&rn3^OHTUIg*0Zd3|JI40#;U5Wt zKw{yeXOukdiTfKTL~GB&CmTFuPq)kcDVHu^E*6DQHtm|9Ct;CZNGWeU8$$0{FFi}E zVK?y#_7%wUelD;RLt2=;6GC=43Zrw&+$Y{UU|#Euo4MOSiwchp*EVmXy>u zUe!h`jz)1l4l`>RN#9(`ALV`xEc zUaql#hv6`F+47|t3LLqw0_T<_c!seC(pdE$GP3v&Ugv6b97zJim% z+y+|@s=Ma69>H294Z2%C+mUy2rYeOg=r)j0Xk$Ue(WF%rTf)Zgd|^>N1cffdIu+C} z)(cC99sHF33ye#2P%SYVSN*k--=Y8%Q2Uaa^b4PBAZzW0vAg z^1m-GArUvQ;=~*yoAu#GA*bo%xWVKO6TMFi<1;BZ;*q(0s*d;s)|_Xt1q)QcB@McW zGMLXkRjK7Qp$e+MOfY=m+B3Y~rI%o`soLwIV}x&s0vBj5t2lpNNe`$7_#^)1W7cM} zmZNL&Sfcfe!rn-V+sOBmWtc&Ral5$Q6M3owPU3u>Kem^Z;oW~N`Zqs7fp-_)ri|nN z(Yt(NQjE}B)*wotl>2|FULItAbJHqx+ixhY#_Giy+7AB;_cQfd>nQkc`Q_y$!*Xkg zK;4rhZLR(G9L;dr^_Xto`Ii&EyA*8udmI->K6)5d&BgLEKclYmg5YeQ)VBH!d>?WF z?_T)nRG%Tv@LGw-bP?K?c)C4*yMDM5IeyTRhik~ZtN2f?AuY$Xrwn25Dl#jyK(q!P zuFTh*Mp1)=0X!1b;;a2HLTyHfHi!dtDv3 zrUFEp$w4iOq+cFlVhJiq`jQ~0t)tW%T0D&VO^OTp1v53QE)bGdVqbYg1?-${jSa%b zi{590l=vZ)r{|`{QX~67ad;i3Yjjvu4|jl##8rNRhss50tlt-G0HtjS4XlCtBxK~% zu`)cq-dak}vet{U2b|&mk$K4j0l3riM(>OIvKs6~dHMX&Wy>u(bgzZn*!1tmI=U4m zeN5*}FxF6mLd{gI5eNiqO#i8yi?ybzF82+ufbr9BhsCRi?<0wTJL0D7{gqDWA$XYE z_Ta#`Mc6s}8UzB)0qXRhHHwt@)nr!gY<6Cq>UE3TfdPAH+JrV@877`ce3R|Fa7APz zN)D$~bFpjjLy9CoWGmZTf26P36`aV}Uo+sOaJ=(gaw~rXm(2z!FCO9iRT~n;de^kE zmR(?N6J(-1P7cLMbqku^Cd>P;WmT;OZ2J=1JNHZ?Uhf$;ecOn5T~7sxP`nQQU!Jt> z$n7;*Kys;-{ge;^uLI9E z17dI%!PE8h5gy5;HAF(WJ_+{@PSPDyY&v0Ev&#WMipToz_{FYAc9ChCUiC5!_`@ut!)$hkQsm4 z8E%3V!trM$B<4rUp&%Xn@6|{K{{Jbp))4=ci;*Q4Hwb?>hDQP0fto?VW>2S0ok28A zwx&ubWIdL|HLQiByKA3zQy$z9r!K!0XJGh*qG^yj-y&YUt~3j-@JFA#wuj`nj4${l zynz)wHiGKz-Q{kI-k$v#Ft!1XUKH*B{6yJJcS;Lk3L9Z&(=JPeXYiy9U}f5=2DJLd z)-6G&!$f(U?z+^GT;H3b}uvv&6}L%LuW)hopz zZ^{4d&CguL#*nrmsj0gXl526NtB<QXr#!#$o6aKxc}Ji`fN|1Z+Tn z>1jD6yo$J-Xl#6Y%9Qmo5Ko&-P#IFMuSwvL*?Fpk@CGBX4&|tGa3J4CBWn7Q7UK-s zt_d}ZnsH-u5Z0IoS@*0?TEa2^Ab48**^8^0KC!0%!=*VXEWo#1Qe^^qD`RPau`GIr zOOa~y_UF{_m6au@q<|n+HEIfyvAvSU@2qtAF<0e_f*0RABUPiBfjH{IPvR;m`7<9k z-ddZ4ji6@KP%|0e_RCp+rchgJU!>Vd4Iq#)C4B6XdWr#ITRqKV17y0a-ZcWiql9AjYxyB}-yOGGR zlcB(yF!-xm3-FSt()-d~dbz`HL+EQ~1=EcgI8odCjjpBy%kFS){2s8YH zJM3S1wm+$3KB+Xm@pKUjE9&!oJZvCS;zB%#1IPK%LnxY1U`Tp)sfnyzQ|eSx)DKA( zSD6g%dufsv$a`=AYgWaYS8Lw$M7X#m-gU2c-~qOd{}BuikB1ub;UdT>hK5#Hy-2*# z)nnwAVI2g(6@`R^Mjo-(8S~b?KodDSHs27B;aX3+XJNT8P*mmh?o)A7^@tht&4{5> z5p19~eSM&?zjvz0=XvwKz59z8`$;z~=?pcV2vw13p|Y}*j3bVSpO=$Y z)6$jSTDWYytd9Rp_s2G3lKMY*ibFEP8+q2cVBb+Qe9Z4BmYSGgo@MZeerHjL8LX0} zJ5%;W#xd6O-bvevs3JyK@nhTRAvOQGAppFOHxmXB6LUKpanNR}{&MIB{q6WfGsNAF zvK#X9QNqtR$CAInb8>|1UikZgRgpCPcC@rADm$U|`~6cv*RMf^J=^NN)RV67N!cSK z{K(^`#jdE)X7qMR0BYI{yR^Ww!hO$u<;2Rq33x_spr%$ zJbGun4zyn0wG*d(o%?{@o2%HpiMaDR#n0NGx+ECO5qf@Kf}~t0xHFXvyB06h<}VWk zOaIyL53NB3l0l3VPq@R(Uj=v@tT6x~gEU(ER1o}NQ&gJZa(`7gknO5Hc;_)!t==>^ zC~tl%QQ32AjQe(fUVtHqujK^J))9Ei|=25bQ8ph+o*Fdl#CWZE0|>`}j4Www!eg z{m!I+xpGT37PCDp#)|ItgZLrP-E#OCYu%m6fTI860?3@ZGjgFr?l?J_Tdt(JxHeZb zx@^S2u6})EV-|%LnRXTo*m~#IQ)fN;N?W|B0(z7kmtNsiR24sB>ThT{kc~b>>%Y1) zcgguJ--6~!B`q#}q+GIxH2gzwr|&w%?1K<6jcAlK`}NP4kpbW~6J`e!$Sh1q@eXHp zH%$5=7BwHZ2v+dJuHCwl$@BL-*r^1T-4Joq^tji!{L+mpr#aR0VCt0gTAv7$Y4K#w zt{P4MIldOdnEf1N83zSTS6}6t zZA!}uYDiRQRl+V%EnmBU6|JdfR#MT7B1jr1cB?>1quCCREI3QRfjhS31$c`u_~q4I z331FL?n_^%(up*%B5*bpmxBxP%wPt6oB4~o4-(yo0$|h(m-%qHXEnlh*NW)GS^KHA3}VN#J_CAp=dI7%@0%A{zY^#Pq7s8MJ-P zv~*si=(zTpr(fMHYO`Ws6;v7F8&Ll=)4=1Uk4)ZMRk>{O9Y2(@H;iLeXThA~a@aep zS*-zx^&f4)jCDQvakRUrTrUHf@WpM{WMmUe1&{$PqT$S2H5mLbsprThGOleVbFfb3 z2){SJ^H(O`UBr5L_^uRW3ygq1jeTD^;(+S;e z4t07zSdHUn?<57$LD=~%;?f5Q1y&bj2$>Q%mNE^TykhczmNR4^$>7iVT2)sl9 zDtcpfFP&nh3l51HaV65`+<6l+^9}hOL*OR<>j(J&=5P|JUT2Iv(K9LLYZ^T*vUG7N z@BY;2GwGst(jeNrYd$eC(P@(eI7YzXjDN=PxrsN5pE6*!16r!CRyB4j(sA!DSqj3= zOAG>Ib+{n^h+;=7MxFA=%seF)p{A*nK0lPG#C2DNi-C+DPtu&3R&J2y&I5Mv;rw%Y zyNifg2_9y4nh;j-2JY1=D+Nox9YXq8yy&%Gr}cftCyY~O1#?; z03N+vczPx=dzK#FJp0njWbRFFxQZIhYmH6s!mscTUjFYgU>B*@qklWIG$&>k zr6$eXUvD~(2kRIGz3au6X^+C9qlwWL$Rq6+N%c|EZ`!U&yokh{Q<`XASv}$4r{>IG^_STSAX|5cyPqFc|ZwnqOIb17caj$Z) zCwU>joc!(~SVjR|7hSNB#o_zVySTr>Ti}IVMl4O5nr&-n;a24@+xzM9CeUXc6Mi$x zDf=&}o$DrAA}%+dRJkEWMnr6Ty})!y$EOt6>c`Lq)C^kNCVkj`O;{90rL+BIs*7H5 z-vKk@-WHbcTYi4RU2^yEXH)=qcxlBEOh(JkqnIxJPQ^JVar=@%^Xn^0;M*7Zm`$(H zyIfDkYrzA1aMsPK?V_m!Y>TkjhPI$|1z&*{>j&Cs=8qIUArM6hc7pf(ggCFjN^byc zNlPh*Yux^mo5v`5oO@vJ)K#Oac^F{%KCVxiTxw- zNh!G^Sfy7MoxK2|gDe9bc@jVN4ctpuqZ zxVTDdo*RoOc_&-Noa4z}O5`5pkSF^*JT%~s-S~FV1PjfZ;x!^}{zr5Oj5g)9AFEW3 zP3+{W<*BzF?`qhfCr%v`6EF7fj`^;br0=6je3rP!?Xx4hT*hZS#*Fli(II4kS{*Zm zsQMJerrvdEK;5Y$VMeN6*`etPBF$$Dzl6YirqP$8Tt)3r^0*QT{hiHZ*G4M(dDXw~ zY}a|uW1RE}WwI$_Ml3j>qzShWNP3Kq6m+&g@_GksE6(k4& zRg3k`CF9<+E2q-{S5vV@V2P|cf)&zQ26Ss#ei3Uav`=9|A92{bxvk9jBiRP_pPHn1 zL{JOXjsOZ6Am>UN1i^ur4!l4@^Shv^(SD}&(>Q`?nXgecdYuV z(weIYT32kj)dUdz$9Ubj611k5myd4LW)kedxcy=zh+Q;-_1q_>5tyUgHwJdg11CE2 zlauakq{mREbIztF<+}av(03c`~fhz=#p|Lqq$6q7q+qewht6(Nby=CLRunMc~P)Ibc#9>|+Jy~ZrlvIH3zYUqnm|tZWP46i-`PCh^f&FYwWXh`Om%AaV}LB3-3h_ERzGe^ z(2Ysuc-Cm-rNd+QViAzm-3Q~3govlf%iiPMR(K_{Rmkc?L5~W{|KgdWF%CgFyFzQO zP^~B3j|lYV_jM=*5cKqyhDwI?nS;zmqI8<9#i=*VKq^_#c6L-&Q-Eg2jMcmwpKGm` z?RJ!%PGdhGbg9tTzu%08o}NCzM!($p3bb*RIlP0J1B$+Zg)rX7uo#>vGc zV4@}WiAfa5?u$Pn%XaiuM5n~D;6CV$HlcKRwmx)lv32e=5TkA>%VluQ3a=d$tZH_^|QdnRDnHuf915^ zVl+vJ<`<5kg+Vz@G<(n}fOnd8kQT=DEy>7=6`%vHabn&$O zRgwi#v$pJ5<`5Yf`YnhLgs*QcK?%9B#e&4bA7c?Ytq-wk-*^3^_D(GN6j48J=l+*8 zE7l`A$tUpJCt2XVBT&4VCo$G3qCbG1a2u))amTybLkb1CooL=je(ItA?#=rWErY z8^-e^>h5P7@x$TyOVP+651(KhZLnItJeSj%$?r}eC-1k-Pb^#>i?C^pe$Xvc89zf$ zKKLhYxssO=l~zF%bzDB-!Y~eVgc)&BH$JjpESP<)F(d6^b7RRzm%vRcj=jcyQKGpM zx=s}S5}EW4o|Kf@g`Mx-%?%5$4VDLvg_np|beyQn4yogf0}m1_r5yHtg;ONG#t)C*36O+dy-MK#|CVJSY%_NI^pY3)~5gdieQ3B;S~Q;F+P(q`j*H&EzdYBHy`b4o%aU6(tR;5r*mg z1v6``M{3%T&OY*lLGr#XkKEkYBd?m(q`x$-bdaxM#NC6}wiCFLz8G)wD#eo${GBCT5)d<~Tjdm03I< zfIjS)%z$>ftv0L#>cvvV5-CX@HLFD@5Yl-ZQ)713B#);tUh;Vw8}G}NjD0=I>~Rv> zJbFau{t=S*k_W+3lohLajh@Wdh1O6`;qjvwTvQxpV@@Y(>|})}Yv8>X`O6F}fZ3|8 z_3mrvOsoy`mC}+(p4^v`IqFEP4CpHpOlmQgj-a|rTXBrp_HOR)o)vptR5Me*;Ia~M zCb=8QR15puUT(BKx62pUuq(|6Of-b!1x&;BTqu}*g=UBf9WyDI9D(@_sh9F9EnRw6m_yuN#r+GE} zjloQ%J3q=R?SWxq`}dOFFPpzKZmf5AvnSz_Wk2$tgsJw|3qDkoDnKg$MWey9`ON*eqOF4Jf}%=&&-Uj;;?4Q zgIiPS_rZY{maX*s|t4vq413$S@94il?pM@AkyIdS(U@GYJ{*ATZl9|cX{+mqINH>{}aUstjbK=M7!tmUfM{ci+rn9Ir~&0nsSP{izOI7^hj z2**j6E%+h?6ULrJd?e_uujJ6m3I7RV>xH9eh`lg7ftwK4*zz6e-7aeOiwiItMq<-k z+$;Yan;HuC)2+Sgt(f{<(AUN!9kz#<;SY$auFiV9RlFtlT}ugU@;wKHHM?56lgetNRC{nf)tNCZ4YuOcdA?6r^3!?X*#ha1)zq2<NTc2TDP-*e`HE2eYfC>-IJ(n5KGp)=w=9M{U2+gdk^X*%4+S%ps{ngG_FYr0X zMs&I}zy!$x3R|MRv1GBem?N@^9Z~A>FJ88Yy!G}ahCi`={lx={9sZafoKhDWrE@C3 zal(5bTn7Yi36xdj3U^LI?M1Q5bNJdsnP^R-shZ+|HON1&g8{7dt<6V2sqMR~q>~Ii z@#T?3p%zcl4XXDFRA0-^i$O)%_fm;wF*JnKeo>=}JZHULQ=?p`6V5CsOQywzAMNDPC)SzCMZoX#I zGwEu%CY3Ja_OPE5~Q#WmdTg3ZzFX`>Kk3Ol|=QbbjuX%&I@wpYA@H^>*6A&$x<-|M1zXkv8jl(2T1 zllJRh=|_PZHWmmOq|1mQ9H<-Q^Bn_FQv(FZllyRORPO|4z~6U{fr)M4nMAW@(hTMw zRAY05e9M`YOP{-vwFWoVD~-al$L-cEaZ!D08hMB`Tk!e$C{7`21lP~Y*rg0zA22INwD z$ou(Sx6`0TqJ>uCWy-IIOd54YT z$^Vq#iwM4{7x6!;JvE#&U9`XacYqJX!or~SxIOVfqS@Uu&chVEs!LfRk4dQLWEwXod|d={#U||JJYIC)Ng} z5Ve>Dg}~tR1zFc1;e~D_hA9di8+}Tv>YpBIyGN>m1t&uZ@-j;I4-or{N?04`h?Gsf z$(6%6$HJZ!H^#$UjSG$rr;pli%&EuZIoajJ66zhH=vf>t4+e?i%5FG7&#48yJ|T0P~@J*D#)BsesgMRMa)edpKg3uGnEBo#3!D>-W%U1g%|ZQc_T)QN(T7<56a`%V_U<9e&U`FFxbv23Z3aOC zO^EIS>0@l!AhIw!NY^UQsY3TuL1|57j@QcJ|JqGy16}Z3zF$kuYrqqG&3)tPEJ@dn zcb&1J?;98mcuoT}^}+v|c=8eyMH_!*(lu78FXe!H`$Y#`9cC3(@oRxQ%b2l2x&?Wg z5z~V@>Ux6SpCiIVoI|C|IfQC8sMfn}J*C%3JApb8wBex#JA?$0edAm?NSY9#T}>r1 z_Y3L*q*84gIKZIQ6O7C(p1x|_0LvHaRmE`*(n@$L~WzI4D_n}z~2AB3clH{pPzI==*&Kwg< z<09G`s4tn0fcS69T4+w@O>8%u@lu(cQ&Xiuuk^as_m>_B^=GF_8zC%1u|g;N6W!~+ zSsB00Wa`bT6BdgwA>Wmn!z8`Y-RTKUOx85CzX2m z5~oPW0|_}Zrb>d2`HYU8+2TkW63S|$Yrji&W~I&7cARdPS|4E&@U8~=ww})Gk=>KY z8FF${f~A*5O%jex18hj0x2zlR6xosK)WR@5%cMJcqXCMakw>)-#HG@5E0=N!=n{nh(hOk7rhM!p^6Gw5?d?2kd2;^g7vkLE>X`{_e4JXn3DKgIQtN}B9v z!7Ith=I$Rn%w$6LsRhOaqd+Ihd~SwF|M`ZDe&GHrjdATa^@G&#?6W(C`CJ62vMN}2 zjSRKFFyjWYtB_U=E;h35ob`Q~)-n=G$q(kM>|dLL_ut7P?lYJ23(i+lM@>Wynte62dbnm_B%X(3N8yOX@ zX&Q3T3lel1((}FK2U)wbpPIi~JU+l{b z@@K&uL0vTU8kqcNL6~z)tY(H2sLNXkjMx*Hk~ot!n!g^^C85Os4A*_rJUitsy#E`| z{i-{iJB2$a$ddajgEFiQ$OgEd5tX(nvZqp$43p~hb4l?h_>=h)Z9wN-oOgbrJ_u76 zK9gqhfB9B=$BgHA&}^%cHabCxr(stle%dF+!d^uvs=WOP^btO13LHpYY511kQzNxf z9%m?Nk19`iZfQM!ZuB{VkspuA4L!SDHHBO>Uoo{!7-AF86S3B$8pUN4#H z*s9@?;i2%o1%Jc)q4?k_@QmI?FnImA0-<{)WT=WxOUb)<0cB^EmHv1K7CI)DrQfA>TyM2M>gh}0EiG0$_TA+>+Ixabxvzs zDX=J-;@tzMwUXA+cMl63W$0&`-JEXFR+j+iF(NPP-f8~T(T16(^0{)a-*aT5#cgif zg-wj_(zf;L7YWSGbL3X*nQ0|AM0myP^YWG60^bU~ee_^3ADxz5;uYXFpq5(|I(Bm` zI?G@Ex%ukp+z1I}$;h)wpQD3yyVFK`a@0M~ZDcj2DMpEl%_@dfcqcLYiVsaGhumz= z)^~M_Rzt?CjW$P@N^ddq67XK+RpOGeE>VVQH-TWfX5dll0;jy{_w{uB;cObp9ktP5 zfGiCeTODMA*LJs?(p7HOr}+G?n9amSVKJDlbY8!O`zwhO*{s-MREQX`3HZ;5(5)c7 zq*o)deD3e4iIs*yi}EIU#)a>Kq@Y4Hk#E;MU2%wQ5Y&#%?XWI*GU+a}RsD6Khx{proam;p#A!Xy zm%TAeXdXSf)=H=PvDhSD)6bLi-1U9e0;z?#`#Nc>%r&t6Uh+k3r5V?uQ9ADy$*7|)Cd}4H0({kR87yI+kcQ7*1?_2QahS_wL z2^S7~+UafhR6}YGwE2yEOG~Ugv^KTUzARYTko$G&Pvd&qINU_R@OylAx{#3{9&=k< zY*AHkT9{`2J?3KTy!#=Qx#Wrme9VS7{r2`|NTy#@(EYF#@G6SKNIpd`K42uTA<(fv7)-uWa7AbVGC|NZ*Zk z;LcI(^_(t$PN|do0p7C;vohPZ3c2N7G9+x&g@w46vyg&oe(mbiIVK z7|Z83=RUuj<9XmSoFM1nsoSHCH}>rx3{0mazA-WREji+YO>Dy5+{VU%R@TU{pEuTN z`UszK(qaoqmDkC49nFnme$dumJ1XW^tm4Y?lz(J=Y14t$GB2oWyz|!ZsZXHETouW* z=e}R&pmrrO%?@IIR3SJxwDx)}UhOT=)*^=>@)$ri;&y0wekMkernYZ5bK=c|%d04E zF^7{-zCHDRnogalB$q-$FL~O-vS2vp-RcH1EKx=X;^sT~msv>eD1w=bLMh>3dH;u} zua1hk`@RN7Lds`Ex^zI0?k<&(Aw;?)q`SLE1x8^20Rc&o5)hC^a+L0pt^ow3L6DC3 zj^E#U|8u#-#l82$KKtx*K7=9P*w??!;%rIA-(-%RJybB|1zkeA*SYSROzlnI*IG0WW%tAGS*Fy%G{mIyr`qMiHf+PBn3z9@%j?5zK?uesLHs)iX`y+`X{a8!wSUkxg z^JCltly1crM^2+Rq?48>f7_9DQQ5l@11cPIHq!CAnlVpAA@l9e5Sb`-V5DR8d|mNX z9uoTRP+y)6=C~LFHNqF^w?Cb3q#6QQNDv#$g>b;)v)!PH5=+UjPaRR?cUFh#430c0 z)C--gSEQ5Fethz4@~t`@!mhjFTHuQk0dh9YuhvTQs%<35;9SY>%gXG*ljSk2_UU<= zYElLdR^)GmzXyjnBozu1wH%`Jqd9T4jB_wk;fO_+LF9x`5yvCQlthr>D_ z&v_5_GCOV27AF#KGxaT}Rm0-MwL+T+X|S)Kjf(xdVdX*l+w$$(x(%{~=~kpHbO}`g zhZ)1?2xn}Z5PA6$z>>oBnJ1F#IqDKgpu$HL{CSX%dKBfQ3Os0Vaa-x5$l<%hk=vGW z8yyT7sHL4BzBx5$!Kx}a(LtG`kZEC|{D)qaQHOS))GdC)x7YAAl-e{{EUp*n+gaiA z=X+#f5Rk-V@Gk6GDaaRopAoV+5iz3av9yXVhQ{(7Demvb5CC=Krdl7KoPwO(+w)WC zqm?~gy9D(U>=O%2mPDE;{jDN^%36?pw84nViZv5Zm|fBy-r*i&fz!$5aQqyVtMA74 zcoyTr~Ave;v`P>Por}CKx|V zESd$n4?4K>2CQ##5Nx2}EhtD-^4$JTt2D{~uU|;_2AOgjx*LPsL>MD(;nJr>cW83C z-H~6r+IfSdVENQ)z*=~(^ZsBIQk(W{=MQVdTMBT0B6k4IXjJ&fKsa@s*#T7XtoA{y zAJb7mXBOpHiVsv!l}-IbRS}U^%=`p}yqA3Z=A>l1)=GEP(~*-bun!#vcb7x zNonR2;ypKo3u%~-*loB2(!KitwnT`QLEn6sRtQjp(my2Ny?SBs@&eF}*J0R-WI0!k zeol7n@S8TzKZ2#?F3c#57sS4kM}PzXhM=KRNGRURN;eNK6?JY9Y>+d|SDo z3VWlChp3&?@2~!cM|w9Af1@82mlK_Q5m5Ht7G|;Y8;nJgTgG*mDr_-8<#3sBo5>m{ zb#w|+5jkOUifxS0{VQaO19$w!>^}B7bw6haMOVD&69(i_l_0d-Um=JBxGu-ngu>4e zTX^Z@vqI0~LMq-VYxT#fOR_9A7<>efeBBU>o+Va(>Bo2(fj5uuP`&<5{Fj^m5g<|4 z4PylGN)UXxiL1aMTeMX4A+;$AtuyUJx#du3J5rnzR*HW`&`4${Mk^9W4Jp8hRcMQa zSxB+G(}_Gkr-~p>ENsp&&Gxy%;T9c;_^Pe2We#{g`*6$1IhG|gj2Sv||GQ+7bDEG& z+*LO6U|$aCa{V5C``X|SAMZU+@K94RggyTW@8VIY6q{W(SrUlxCs z8ERH;9?Lq`cESo_V)n3Fs96Hm7%j_O?}RUU{1*6k zmG&_>si;TA`-O}2k*s4aLO61}sQ$+?THy+9NlG06+5BZ za1ob$=+CApN|#+P@T-|X7e{~JhdVrk4&Q+aA!ZPs2>q0|8?&q!1@1`}_?WDc|9v;e z9*em=1u!=w5p1jAY2_+)GF~uC#+8}kzxOqD6-_NXApF;a2uRIAJuG$?k`I?+6$wim zfnmhhhKGPfptKaWXrKiUUtD&a?R;3tI0<4G_m)zdEzCj&Jnk#YF!iuY5FE+yY<27U zRT2J5GyjbfnqoDNB0mKl8!I&4s-r%k-LnAqZdh(Um<+Qh0+y#6ttlFVG(_uy2nK?< zeLoiLZCt#_rMN2Y2Ctl3UTdYpGQIWU-@`^IwCSOH)gOaPON?i~%I)~$$zo?Wqo5Ea zpv*#N)*=bJLO;uHWkK*$+Lw6aWDzpGMY@txH^t=|xpSLl8SY-CXr&r=6KJd4$1vnL zf8Kr#poeEGet>Kn$mF|tX&>IwfxAmUw}5&KI&8xlFMQ83F&}yIK>il}x*!xp^JnhNEl=(5X=x-#?cKhBiN&}U*H2iUfFR! z`^}^jtQ6z)9; z8Zo%3aVP9U(jcmO$`5;COSid)SWpF&BuD9VPu$|9C`EQcH((D1OoyV zwbTGf*C;4ZL7(7V<1~ZFkXv2N;^wcAP7medCx;iE7_5i@kQIsG$G+PzK#94!lEE&H zd|%CTM;EZkVuZ$6hvu}IZLpHr%QtC$K_u8bxzs+7qb%y%hc}=ZT=3NEsDShlD(C_(FTSW4_E+t} zGB+abXUgo2#Rj4>K+%*c_ZI4$1PFK)y)Kuom*I?~`P`d34>IP)1l*qc+S<>Q{w0`J zHvq)ge*V#Y!->S^RqWq1yZL55Sy`p}G?I?5=gcC8~@-R&x&-p{?wU^x^B1zHFO4Ioc3C8(NpQ}O>z2@ zuL8okJS?A5v^Z(Q>4Iiw34)@t(;#dgkUH=4am;X^B9R(+02_H&z7rm>p?fqZ#g!*4 zp5z**1!@&9jKZ=DkqNj8m2O8(5G0_`;hPt?_Q=tZ%5Y0tOUxH!9gUx>%DM~7yIc{v z;v0KrXj_J2EF(>Z#YrX67i={C*%?w%5)Gi`{v=+<|I+ZO>x;hQ*V;mUQhPpv4+wzC zm9h^G_IZ}x9o$KkX{q};9|u4cgFs252&JEW(h9rCu-6`)n&4^S;~-k_ubH~K<%Clz zNCwi)4wWqJPjakp7=>?YsU*|CV#OF?pym`gcH-)2J+xf(W7GqfpgF;wZY@Hq-;ar( z{^XfLka(Wg`*+H81@5g>=y^8HV;j`yc{&PPPDG$_(YvU}Kr+O9K#jO1=rwwNhvHpF z{}p;0x=)D#I*1T(&NYTIC(?BC#%W>T;T=!E6S0;&Eqv~WN3*ZJx?^{Q>kXo{fa{V{ zCFsyDbP-RH8Gg-esI01Ms62dC8Z+`pNK!S8Nh0`-Zs&bC9mIqY5_sp8RAn^%Jt@D} z{Z5(hF6(KycXqhFebsmVUZhxMGnGv1M?bv%P=$jq#Q8RvBR$JAmbb`c+zEa!p&G@2 z-04cLy)~pb7fe)$9KLMMmg7&C>hQgZl(K>`c6qM_*O%5`-dPSnZOSO*1WqlEStk%B zDA*BP(bq}9OkXg>SB>oVAX=9xqYkkTC*Z08SYTjzNr;0dl9vln*kTriIt?$H>ihy} z!!DVvbtRnaEO}d(%(+qIMdLB#ud!sEL5b;VIa#|`ID!mkwtV8ilb+T=Y5oR{d z?SwX7M=g_yW1q1RPme8o&5eDJ-ZHKVul6Qt6JO~g0X@4$6p|Td0=EHJ=D|GiEG;w( ziL_ac6INP|#0ok#=F=A+*DS9%vAEK>g0>N;Hgby?HeGK+ZE=$EOwC~Gjm}}ku%$#~ ziW5Wp4J;Bc!wtFRm4XDAd=e{77KvXwU1!NSTFg&x|IiMA$opd}oxfY#k2;RSTBu(& z?+lF0%`rl_IjJf?Cg?&;-P*7tcc2AI3Jvbc4rqa1RYVJJnE)9G?fclY^~Rq zIIQ-h(o`Z%q3d^-S>jCe!oJ~;8;{Rvo+;`-S^o%<5m`)%UWHpMYvGIj-wY|HUCT_E zF!zwnrpnd2?XLA+_9+sF8_;r&J;8(_&yy>+N96E;`~~_)^3x+kI-Sc)Nyrms5Rq5d zU?S122q~7O4Ctb-V7!u4rU9W}6dWK)vZH3Y&<3TpC@kQ($v9K{q)m*LbELBJjk3t{ z4NMYpvh@M0$WWa%ft$$JZ}wPE!75ISSSpjBazfT(z0QoItmP9~b!RQry>*F7-$&?LCwLVNi>Uh@RUEPM7MH3Aks}`*Zs8T`AY(Cd{ zro91&P$%WfPpsvdH zL3f~QD15_#ThUzG@>vs2h~QBzLJrqB4f%8L=!Q!SGFI`m*bWf=A~N? z@7tq(8HW6|%Wuj1=7aCgbJiHznox zjsC4~?P)fD@0bxc-uR=+ZcbH$e0-1>slI-2@nJ(tK@I;%^rlRtHtOMs!*h!pWcETV z&oZLJ&AOkxn7oDe;f*rgI9?DcyTHZOk|BQF!f79dc@MJ6s@mRpmNm6-GzxKvH7R%c z!ClBKLAp7emdO+uz>6`0#Bgt6mvtOo{1|?$Q@mM2DaDT)s7z5jTu?4(sg@8^fNQl& zGmy2IeIQMbv%V0h_;K-n?tNe~O6Vg|lv6e)q7bjc2Y&nAl(Er9vkyTmT`i=fF%yBd zUu_*d7p(#xd2ir+1l{V}69tJb`~>h~_i(6it~%;rH8;!%wzP?U55h5QU1E!TmI2C) zA}TA6uVTaEfK9>>k?#j?1ekVT7k!XD!W9%871Bwc6F9wqX_**Nr%*uUP)ImR&t%sQf;52lohh#qEFi5;ymsA@}YM z_uTxy^zmd}5qsMNPH?UMw9(OvlP{U88h^=hYou*&VIt9g%0t(-ARux5zAT8X~~N-)$zh=1c>1x(mtmodyhu}k5R;A z8pdYNvr_p)(z*weWK%Yi%&Hp^9-y~*-zq3Jy1^(d=jWJV{pVs&400#t3Pqkal1vL? zlwJmCgS#EgE>%|syxq+l({^I!zV(zKGi$Apler$bF)rXYv*4|dA1?5bkIop@Z7aMBXbID1}V0Z zeOk*!r$pkG3uIk7hOOW%H=f^ea)oKhPf^v$!sZ?B#5*`${2|70ng{3Oof(`&E#6p*u7S{^w07BFP1so3#!)gkJI7ehG3JfdOR5A8{NBp? zH4W~>+P2SeGhK*>bgc+@PG7uG+c(wTL*6y)y|Ti+;ql-wV3h z0?J16`ZSMPLFM6~qVVNb`mLbkY(T zVX8-u;oOsY?e5cnv*~wB%9cB?qAK%yA%kwG*52?+OcM#Uc%Bq_H#vCXRd^j&)@tjVM zn0lmNsLDEf$32lo+J{jBKbs_UMdfVqWOY8e_uY`S2J8n)P2o<&ab8wyWV&iVy}poF z=B_&64gr*Sk9t8)Fo65^U$xXEJ;iE&qo;+r|K~*QFQo@?y7z%mEDOn1zrJ2^a_&~G z&`Ocr5ib%ux%j~rB3|}}f|?-mh-%X1Zcg=;ucf*bLx%P9+lJg5Ah;~d3GBTqjX{*$ zGIK@K8?(Yi?8j%9`mI&cR`Ur)M60pOR_UNaS!OIgGB#Abl!}O$!RTtw-$#KLTn(F#jmoYEGCBE?^`F zgjrDuyqHv^-#blZGKr%}^2sp(t=Bp7it77#@~Uu7a!f|~MREVhyvG9?o4MRxoom7! zKQfB5!(_p%rMq$}Mj%=d%Rf3O1MwAIwksryzWa3)iJNVCU(Nm9n+dEXO$>Df_P@dd z&Pd$URX-#j$2!UB;MCCcOYcj4EB_c*%ROjkWco4Xt_?4WU9CW}Z{7a1GkRbp zjmqu}->psgC?cGDUMA={_t>XP)I+ZQ89>}&`RM8guo!}n8wNVMBYuH5-Aoj`3K9pz z$P2UkgkMJRH>j*_l49;Q6lB+kQQ3kAB~xsh3DZVUjmA|`J;gJ<1MLJdzG(T88__Sw zfBf%FvCOLy>)?S5wZD-;oRN}|ybKZ?g^%%KQSA#j8!5+14As^bVN$Zp1 z4}yEoMxPC5hEjU&xNsgA1jjVi5`-ux4No)l0_R&8Jh(XIUa1{0nq0~>)tYOl$(!}^ z)s|f`YY!TYO#QQVY+Xfm?Jz(bDrz}#wM92t#OoO_O#mEMQdQr|ZF>4?*7u2+jo+8^ z|J*j;VW=6n?MWUKfN~=Ws3L|QO{fNrkcxM~3zH=b;?4UdPnLVe48FxU2hhsZJFWU} zDr?(f51FZoobE&_wbg^cTcvyO5zfx_e)MseZ%RZiG!@tBHZ&63oR7?PXXtDs7AN^v zGmc@KB6RN%-!FEOEcEEsA7Q+CtYb{+?nGubYpGjL=NDCcs*X&IAI93PaLmg&xhGSf>EBsFn49TNcX-N z^y%KuSC2v8N{0oo#r6sVrjuGbI}d_mqp!u{*QqHnF%}=TRj^&Tl8jKQUP}A50gC~y zvr6~0UATV-IfPb<^kU66R&hWsO2SucW!8lIK$#IWAmqohH{C@H*pD4xD8>Fs8FHal zpPullG0LjzX{nzE8#`QkIySe;tDhc3h6>w~v89#2Sb8_e>r~%&>Q^*vR4W;0xPqCr{Sx_GH7AY>CcgPMn3xeeoGSH141vUb%q0szBywaBz zFG7Ctk?t~AUds$Xx=yDUclir)j*frgWVor(Bjm&Iz-_VuqaBrcvOfzmGZ<I{&61wP5N zS14la(~zlx}yF1DT`P0%huBJuX<5{87N2`<`!lvWEBZNEV9 z{56Q_x-Db~&_M&z{URxohr=`*^IHyC-xEhXFwzZ0*$j?1;#n`BCiZehD_TNszCA>{db(s;m)NS<{?YyKnZdN11*y9uFt^8m;6;A<_7fCqISroXSu6#7 zj-JlR!uLAG6Gjd#rBqxm9UZTFq?bv@D<0l@cyF!im?vf-)dp1x@k|TV$PBeXv3FHa zsH=-u5t`Qv{H6_gul46f2C(x0sQc|b3iQhb@bFx2HVQ#tilbf-ORL{?(?j)be(=$k zm$0e3+rkPb;!Qy8IfgbbjNIjF~4;T?kb=2Uxv|Dc((X&alpPR}nn?zjy8 zk@YeMp32Rz1!mRw@xFzq_YI-Kf2RA)TNBG)^h`Fm+FeVu#Ch5Ztd&{}Ft!WEjvWzQ z6LrPdao$xF0_&~d0^e%bZMH;o_lWkIq!v)$Fyz8S6P+^1C=jnJ9$wDK6DQZ z_C6-XdRB&;asJwDN{ntD#|%m}D&m2*>>lD9~=>_BA{%aQj@M6iq+mn7`(R!ziL+C4hVZ6Ln`62r!f z?9G6AVA&m-6njy~lyoi0esNaRh(1Cd8$JtV$*Jz!x2fb^93@$dcMgqW#;5!HB~&ZY zy*)^#RY7lv(8aZM_HS>$cYJS^M+euLkm>NP|F}7S&1y1m&TIFE2MSJ7RLGD#1Z-6Y zL$mS+Rog*6-an&v3*$p1B)p&h)SJ&5ru>bY;yzUIBmDe`@~qQw{`#kp%Y1Pe|FhyL z=H~f_<=>u}Mn3*RQUy^ozsr{#C}g13L!vsEZpMu|4(eYwl#_E(SOJ~HKax4O1jhV^T5cTa{Q%>)bX@TurZGSOqwaCRz!5L04wtSlY|7#0sA*V z+(_pn-OM51zELAe2B+9MhYH-!JEY8st4dn_AcNRV1r128%Lg=w++ogy>s`D2YtQ0i z2uQWNNvl0Od~f|Ne~DJ7@Bg{N>*OPA7HH|>I7s#DjK@$0(`q|-Qa8#g`6yY1p90P^ z6wnIZgMjxurhwTy2m2Y>XQ@!GPx;(!SZPvCWnvgSy*3Fv4a5I_HkNr1Jc(IqKr}Z%<4_W=@2fcyG;xhD2)l_kB(|2V9u1ml3z7Jf| zI*TO3Yk2m0@%pwwMKm{NDB1BzCVsq)F#r8gu;64I@tyT43;u>Rkac!wz};jOzLFxh z5ytf#NKYIu?ud@v_-$6u;E7Six1e>S`l=gVbE(~3!FLCf4eo6l%Q1sKjj+vH?Lf-J zc=D}8U)b7U@ku32En|sYRo6v>Wk`sdfEPO!r{fF*@rMuX1N`jiHo*l4OjMA#F#mrD zeB4GnFELDLTe21_hR#7wGS=OmYy-HAHVROzqx)(?V6-i$D-&2r2DX8zDP^nan3<3v zoGMys$3>sZx-P-P{cG0vixcGs8^KrjqVRv&WwBlT!p#MsAiE`=CeV!NdGJ$jF|$PQ zwWG)S`Iheem{P0RY$;G0&-l&{$90m18cAlcjTtI3AXf$=vp_ijeQ zwH0OPk+hsikw=**aH4-gHh?*R3e>6n*}F_0b$P9f8S)cP1t$j}{HJa>Ar-fOg=A_=O0X89_c8r<2#r0Nn)xPsRH(=C`meS%jVA+L(P`vf^I& zIPQIKPJQ>^q9nol4XW{n3@#3(LG3B4v(NQ?-1dHakCsoyzi#^ahPlsi2@>_HDVaJY z$-zO>G|_r09dGI-cvX~11|*U}BKVXw&|qY(fP81q>g&B6zLC~M!Gji&7Qa(JzE;|V z5nL{?+)MpbZ!F#wU@WG7b!m7~@Qd_?dPoJyjB(WU7HFafQ@Oew{Yd(=mAUHs>sM#V zVG51u7Q31*=!ysAlVE4dbQ2a~GWwX&^7|q&JP1IboGRVZZOTFLN7Vc3sn~x7x4?N`lU)f9N|a<1}a<0%GAd(3-nG%hJFH<2zF`tF_f zl1P{A8A#-~1$$a0 z1`nLWzG+;m+hjB8mMpS|xVqdndPIIPk40pZD8pLY->ViNf;b2pai6DfR;-H zf3cpebY3FFb|#!`u>I#20N6#4 zW?#}emJhDXaz32=(E|x8KGWGh>EKx4Gx4ZHc4l>rdyn7TAIvfUpTo0kvds_t<|^>$ z^HU99H&sTfR#?37*SB4tFk#G-iA~YtoUYWe6fbKEx(mEzG|}_q+W1Zj{qAW?_BRGV z+5~>FqFsGHC3FIl@(54-LscYRo{*IP-SP3rS9zNz8w)9u&p1q&iC~-QjMks#));@O4pJ_4H zf8>$be!~%K8|^RgGO<7|#^x+kcukr~4~z-O%>Fyt3S5;3xST{q$WlBLT73F~LHL z*)RHowBJTX;B&sw`E*^PujDJD<)~j>l@1`7eZ3QXHd1Fd_uuIhG>clhZF-2FdEPY~ zc(-eKyJfychvt}PZjim8$v@_?e66r;FB@#4bR9B7*~Mh}yL(|QpRz&~Ao>L<3nLEB zxsxyW{g;4JjRbg|$~-Qb`V6j33A$sM>yKHmxfcv-wt=t#qQFngmB|-dlAO4r0W}so zBG79Q3C0InK7t5h^6qUgWG-SK4wI4f)6n8^z#E|DJ>i10L!_`1h+4r1N;-v4n_irb za?J+?4NvN$C4d}+S)zqOoj$oHXN=wzT1XMM@QVt9bZbmm0>dk=$sDLKFK@taP({z8r1w{!_z13V}D_DeDwq4 zH+X%m{*<1ZXgsuZA-TKrGNMDQtny17N}&!sET&Sr~N64243I`R6y4JB zt^}=&=q8>KmO`c~+_fidqDuEf4(nF`!_58jhHI=h#ATHxbX*B#?XL{0g;+a&xcx2T zJ{%9sI(j-DOm0yd&CNxB7?-x3K;Z2f+{V@?Yt+K~N%MGve7eEzxiG6N*4`bEPi5xk z33rORd{gNN{-b8-oZ{H9JuM^0KM^2Ye_w1S_cKzpP8sX<}x6C?ueBHzU(Ya6V#D* zqZp0LEtrZ2GRq8b_HcOwN5`{c7chEe$lmLbKR5M^4L1@L@`dAyZI1wVsdbq`elct@ ztJNCg2-pgz+n^3bfei>+84{LS0RvK)g&Ni#%gd|0wv6Z{w*Gm%i?N=Y@wqQT9S3NO zr+If=ItC92g~&~)f-s^wD?Ux;NP_MI;x&d)7t)) z{jKrfL0U@r^`9U2Be|pQQwUtMm+A*7X1aH9Ufbbqe(cXQZWP^Ro)wS0Ce$_Y_Vz&h zm;(_-KUQ%Ws0}15o>fmXfeHE&3|JF|9b3aI`W49EX!c!GaqR1seqF112iJ$UtWW?V)P8RCD^fHxoez!J!Qlytn9 zMj1+r9OO_&9-8}w_Qz48dB zGa;bpxSP4uXX{q{(IJ3=@G$6az3njg!N*a4iVMd5k`rDuCUCric13^=flj0k&Hxh0 zPh9w-LsSSM_5q8fm-kk+3csh<4`5P4ceG7q9fe01jQhUTc5mE_|M{EBTB4S+(>x-!?bu(6Ed9xN4c5SE-8!CLZ8AIH{TC|3AI4DQ9_Z&%pqsH%aab<&3F zMF!^^{|L+(NK2%qOpt1P(HsKJ(Q#L#ZF1Eg?W^_}RB+f0Ges0FDe2?Cm*S3j?dcAT z7c{bFDp;B8hkV+29aMdaSVihuYLYl&hx1~czv=7hrLmbuBJetS$eN|fx@@V1-7yt- zqs6|;&sdj*@axJNat_^mZ3`P5jqEFAICPWQUwSJ^IzL(3PoM(p!Zi=B!U0ZB6f_$D z9edDNkm`{oQhSq4yb7=uy)XLQ%7XlB zEqG=lf5=_}a?g>+KB$wI*6j;i__f5}nui>D4G8+}=?IT6mf)MOC{%4N9`B9(4-vTp zZa`2176;V((tQQfDgY8vfi&*Dl#=(ZY<=O%Zv-`CzdpKHTA>hG(635L+vQ&6dqKwc-yn&!7V30xgJWK%p}_Bn7- z>k~0I^URdLaSpuXhGN?p(9?B*8UYa`1tS+GD0Jx*dx8T*%%Hd2vD}}d1qpcKI+~)7 zrscK`CCsGRKcsy=?m&qTchI?|IB;oM*WfT1oMON;<-xz)HIFd{`0m9c3PL`vuP>^p zw(m*NRBRfhS=s|MLm?zdC|X6B_ePtk=(4)ThN0q%+oHn2@p{6Dqv1h8-6{mKk~cPc zt8%ScTD+m)l+n5;;ZJtKf5$rgdJS&Iusbr2BEok*DZg?_@jO%PkzuzAv;epc2(*wZK^B>EBE!G5n5ri5+yKbwiomwAayH?AD0#S&}D zspUW$*Qj$!9W8<81rPg39Y2H~2tLDg^qUEsu(EeJ3P_x_P=PZepQq^s348O*P&Q|Z z_$Q94Q%{p}3519)tfNW(s(Bo1QH!e5M)Hs}OSJq;$r0B0`MTiHj1}AscPlJHLHmdh zv~Z0Fe+LOx66TfHn)Zy>UrXic7^VpOPY0zZm&iN()s}{c6`wvb5R&11Sp+_K?I_Pw9w4+ zsuW!c3~(WQQ~t&2KcB?mmaEbJ+r5b0*nHGNtug#jJY7GE$2uu`|3Nj6*jm8B#rNo( zfLFVHe18^5UR{FVtd-G<^tCqhu|@o40-?FdS!pdyqcg5!NQuLkxX)m;B}>Pq+cY>F z@g{ZQIxYRkg6HiaPGaRv9ycL}O6qFzK9GsMKrgiTpiKv}jh?-k@;0Hoo&MtR!y!@R5+PuJkw6(Sg-b=PXe&sJ1d}Iq7rm#oYgX430eX z$$a7}I)8&+k&$?|aUeJ8^q(LhRWH?I7ye!-ukS+SVwASw*p@=?aCK??PxF_R@I{HB zJf`!l+56k4TmJo_X+VpSCLN0FHeA=_pJWDPjNim03Ynb9s_0(n?dfArYRLoE^sxKA zC}Krb1MUsX__}w}J(Bl(y&v)n-6k=*m@SBeYznrSbEwwB}{@C*I~#EJMT|*uGSJM`!0h zL1jlf(D=!8+|^Z5xPxt5iOb~ud=PsiK}b{8OW#x8im!7Yp89+Lug(83RPK8tW^ozV zj|aJ_2MeuPFMu>(L>#17k=23(?f&3*seh~|9yYrPJ3k-F+zJA-68AAZ`p=4&06j|2_O4I93j19 z-x)HiTJ^Sz%TgZofX^JES43=0A>)lnnM&)OpaOCoOy-_pW3JbLq^DR#0U!WJ0$pUz-|m+uXxy_t~sl+lvVr72Jie7v|MFDp>O6}LJuL1=eSS3^jZRHJht*Y{}(8B zTe>J1(SI#ZP4O!h7{X$I7FOJ6)yZzBy%3$jp!s3dI=HVJkgCYJ4ejD;>iOG)q{EubT1kccAx4`A-%t1fqy9pIs%)Ug{|FYw3K>@*!s2SL#=a;*0xUDPeB6ReVQ3+MY>=U zg}43WWL=YNW`L;brQ<)C@E&PLFktf;Q^jh3;Y^ecg}%5mg6tI14iigLkWU#4>%B1k zxKQf)Ao7Vq8t_6kJEzNgA7@y>ULP>?CkZxCoVgFgd|8#W!dfX71d1%#CTOec-x2o= zDVJj|Vvw=`V9hXorIa*YCvwNW=UbV0$jnLi&eJMlb#y5NU$*A+o83Owr9FTdavf4z zokqEr_1{r~iE5RF->LI0bxQzS(y`35?J_aj8;YDSyoi{S7b3uG);W6E0TZ?lQ*JSt6sKXcSO_4vqKsH z|DgW4DU6Tqm9WJV9I9$N(gLw*yn;k3Tl#qHGiYWF=qDw*Vz;T3Y6A;_xScr!CzE4_IAmFuysbEVF zFgF0|D{`|?XdI@=g-Q$n)HlYFcTVRuHCS^_4;UJ7DzGKaw{Eh)w)2QgowUu(Pe@@ir*AZ*WP{ExP zy%rP{n(@6qi7G|#N4IMRc(Hqdp5d1Lr|o&KR+>!^LY&Rs;jUil+pZ$kV=3F9YS|If zQh9}qG@LxJnj0Zjx8rqmulQi1A_Vp&9l|p>fhoXT=g_@Z^dBd5KW1;Or96xmj9Pi@4YwsMgyJ1ur`UnUmhy_d4oDymh%V`lDeYYGQs1{m&g*EIg{M|@{{ zy!*SL3>~UQdKpkzPmLzqz$RpDUACz+xM$@7By1v-sYyP*i6Qe~KuI-F?JV~4j5t{8 z*mk1wae3zYo6b&8CKqR3>QWc*vDwvVt261foTFbVF&S$m315zj8ZNI?Ot?C8Kg;-MN-BV2cOmK;Q6}KQikg>uoBUn)2{L$#_2q1rnJs=B2 zaMnt!Y`U7-PQ?zGjJS*$jJ+)ZDyq6EpeG-pIlR-RzA&Oz<7wbBov6w{tfp+#S_+PW1}8idxOJO?vDZiP096^b?oaS%&qNZ zGZCPzp>w1!h~AmU1cGkpTD*C&))?vLG_aeJhcE0wE%X!Hv)twpsbC)9%9I1>kx?bw zUFvJy=aNQde%m&(#9q0gLup3Qys|9qcEjeWT;dq90^IA)9*26B_#%e{ zvc9*-ZeS-PSbn+*?Mg56I#El`Ev8t$_1}nsrN@xOVbdYExeN%2Z@KWUsi9!+gnHfJ zzcrtkQJ|T_`8VHMb4}Ook8h04h9Z95mwpxCKfR?uX5ogXl(U)C6sfSBqufz^PMlu# z^$lOYt3}HSev#dY$75WkjScS0Z$LYe+?s!Q`u&-_q_^q*r~QC3jWV&iK#ApOP2v5) zsRJXXl3&u^-lR?w(+syDa=1oZL%*J)T~mqsg8?VP?(2J~@jC(8%YDHw(emY)XP_5A za{JmjbYCUpskfAIeDe~Mz}OkQ=;JOfJ@~`e>ov#%_$(H$lR4he z=d+G|6m)xJoEmqWxl}4of$`_CTphtax_Aiu>y)|o&-QL}jV>2Eo(_2cHeWRW`?hk0 zzxM?{a`;gD>ZPVgy#7(tG3f1`S8_LyO`1E2@=Zl{5yyh31WN}XJJ_kTWhTZi?Vo~2p$%<1}^W(YX|4#ZVy#JIQ zA!`$-bu0_dS1!NTAHS5BqehG(*FlDl}42$dijceUI0wmOdjNw zP8I5k*#*u_@A=2lcF?&wSh`XUAg0;soj@8D;}uFqP-+Z9Ab2vUe+x0JvC54y>( zLo^(9ECNl!xkcW>v?#ZBNws%)5OzOefcnRfry{l7lfZ4#3RQ!L1Zbkfjt@4%W)mveU(79k^m~cks>TZDu@DVk%cqW6%yk# zOI&$s;h3(|ZR;Zw(~naJ8Sy&WA)~VH3)ElM^eKyO&m<8$8FTX2#(BbxKX4iKa`UL2 zI91!Yd1z;^{{n_|eBf~!g=NMZT;VLo)f}&kZ&_7Em%hKKmyqdK>PSL{r}ufKZo8=+ zD03FD>mPNyV#Dk^KHf|UlS0gdHH3wdULRj2j{G2QnJ}uRh9=NTuI)WGPg|T&4cj)a zS_{wBoS_lN|1C(oUbuZkjvfGP6xdWlM%Oyp2YMeUn``7GYu+JJ6PPN-7)=uW$kOhn z8=?Ff&NcaLE7&3PMwU=KNP6nWkUyH5w55gsY&nwt*xo2YA$;$=-k;y^zxro9@4e?f&f}c(IOk^kmLqrEBn=sR zp#Y=_%^-oSCq2mH$QApaf1^mZ%&w zri;j@wbN}Ikih=qpLy*epu&*jaaVL!_XhnUV0KmYs!9c?Ww4T zj9AVY1rJ=*bTgs`jPRjnxzOn>l%E{Wu1IWBI#XbiA9K4;`CLk15Hx++f;1Y+Q4@3v zJY_DYv@__d%}cO(e$_eXn-xLG^fCx@vJ5Ch&z^Z3H!^43!pR0tStIj1$g7%PvO&vO zb0)0wz7uV3Oj4KYX-OY!_5WzC>B{{g%?9{Xhe8RhlkO(e@RzyHIFFEk~KRyW~k^j8?M#aA^9xs(07^CHPKp3 z?1YLj^FU$*9R)?EctHbFi}Kw0o0V+-ec&dOHo#}TRrrmk7q17t(?+x?&%&l46~}ib zI=|b$Ff?Xwt8Z&FFGJ6=vlHtUA2lNIxHT2`xX>UX(Vg_R*UcB8wjIA0MTT|pXg_{>`ibYDwF#yqyZ`L7vFQT)};wrvqP zNU7&3^ZoqY$G=;%BETGGZ+FgM0K$k0Q!qjIG9FZ!qjUU#gAt}HY5wrm0h9{<4a*u8Y|9ihe0zKi$%}RxD5%P*!f;JiOn$MxBmX&VUuyZuEEPf43 z_|2e~Iq~_OZUjTcVg-JX6j;mLz?0%KAU3;?(g|#tie26)AOC0Cl|#Fs+Nt_e76`NN zQqEdd1w5&BAX@=GdDa)a1Z&q(0UF2JDE_o-%O8$gtxGf+WR{QhsMSe5{qY|*#^2Gr za^5HVMGt6wiwe7Etb-~}?MO3fDKzYciz??C9hvLk4I*i^uTbXD(SIm^O5Y|TaB!1K z=EGuK!6U3}_^OY%N50g}oJ;@*U>1@*d&VOs3pzP6Udl$e=qB3eC_fd)AX*d|EbYzd z^-Kp36D@KL-3&dW+VXfk%&90O@bmw+2xuZP*S?Q&F<3n!RT3hW+E6!LuZwk2Za#S+ zj3|FR(U2NY>iF0%z_>pRZ|Lg8Z+Es#*`dOjJbG3p#@Q56qs-y$n;m3WJp`0MJZr(9cTXtplijcgK*Jy${$H@KK09G#9iBn zx?~8pEp&$-doZ}-efz`nfS@y|=O`CRyqTZb9W@^K(75#B znGOq(7FG3?Y=Nk2_%HJ|xt#&!!wXj^T+)n=p6Yz{T;l`}CW2|?szvo;wOrfY1#|W$ zWK+`N-j9uPf0Jj$z2OH|GNw3lo^#M-{vKei7P>Z+vQku4pJcj{yNX4N5=%_N)^j)w zbnrh@aY~`_p?<$20`{g=I4|A!?-c<=mOog#@b{vsWh`?fX*96p(5@e-W}VUxX4_MO zoXwqy`qTgntwcc1O7(+h_OT!1E~nk}{PRzk@!|b~D%v#`wE9PYoJQMzw{CXBo&&GH?~hdAfBd5oU_g~`YB$t*Pn_DO^$o0gFi~e-L-|y$9CN| z9=@8(d?~N{fuJHt=iFw@c|>2qZ8J?i{e+3Zf%NvxxQS*Pt$!2Zw?fZ!C)ok8sar)p z#j9u6Wc6&ajE zNDB~%GX|Fv@v5gO{{TghPUoYpv;#Ef zmk@pK>7pFdy@&mjg|2+=5|zevVaT@DvhRVq6ZN>%ch9pyN&1ZPxm`T$<+F!VrbUgC zIN$irEAZVM(9TSd^`_G;YR%7gNG1`n<4w11mjW6|7+lhg6rn{vRi1HR973Lv zBCspAS`pTb;gNQJ0Y=bz3;;FDjC31hKwa~0z~B_1=-=iyd@Cg3Ml#Li4cJ9P?p?r% zHZG1;fh-V1e?-bETfi~ew(w(lMwOiX%MFqxu)q|LZCq}Y>SD?hgu}^^0a5Un}!|^?Lo?;ygVxneaC)yBDX{y`>8XA=m*hy4RDv zHXn>~uO_z!p9{uwC7yE#rN7Fyf^AvZ({qgbkvuXPpx!09!v2x7&Om=r6f#3v zHwA!6S)f%hntvi%)C~q3v-kc5&uBdPU^}!PmVHmNyMyHt>CS*PUnX_Ilut z)d@CG=Mn$lbzt75r$5^7fUg<9uUSuS1BzZn)S=+3Yji}d?5-?|71e`-W7@twFA{2l zc6QC(U{H2S^3Nb;`9})4bgGz zOvTN`+1>6Xgg|`BW@)gnBO6!aqiDHtLVcOWUT0k&B4C{2HN)*rYChYdcg;&ocF)8OSJ7+A)DU`yj+1I@Ft`<=dx^;C}Kr07k`{N5IVY{qW$fOXL*4wA|*4K=I=xK&cj(R zs;h^G+C#VKYi$!U5-uf#b9g7z;5Q@;DK+Us!)e2>s*}->Ut_XI`iVqsbyUo%`hI^o zpfona(viRZN0IQ%dZ-RISG^TjUb4RBen3oB4Z3}-X?nWwg<|*Z`{oIqqcw}S30|Xu zZ)AnYkhgA=Lm;(ev;2-?(ze{Mg@R0Qj7VP&g|&Wc-p#s+r-$*1O-AR|A5kORp%h;8 zW(e=TB)SLLlad_c{a>Jcxw&n`8&C>}Ekg(2mopA_%wQSry=2{YY=;p3 z@Ue#+X1ASaWlwq$IP}QY;DUjjs+*MdOPSa^A|Vo5XITTBesY>E6=9C_5MZ|wI8*Vj z0pb#xMh<wBm2dA@tW=`6R+WlW7Av@ja>HK6g zQE-bZ#Gl1aCtW7YKGe&LI(jv(o8;^*C&UOp#HitBNBXJ5+C%!Bdo38ry#pO|jd#8s zwqG_qEGFotu@-(T>UO`68LB+Ul8Nb&|C4-k_ShG}o!S)_g#h0@?ycHfh9Mf0um2WC zv}R5=BV`c@OOka;&~`K2I#db+SzptbJ#MyF>&ev$pElDB-}e=z@p2@ifj}yLwX5F2 z4X5@s81#3}JE36537Jyog~PBe9!L*j82VH_c{)q*w2^|eLC?AicnW$F^0(W}n^|Lv z6$`I=?K-p$eyMuHK1KoA_cqln-8r`@yZVC3E-=Z@@DG1peVwI2$JygM(hx{qgP$9EFFMp|^ks5?yaUxlMr1z_Mj{#5IrZT`mlS6!1_*z&l;^#+^M9rOqh)eoeWfE` z9H6_uq+!{@-Bisl&Nagx;D*8NxYa);dtApl39peO= zN1%9euxH@;_st#cr;zA9@Ac^0T6G|`OBiSOB1(8mVYyT`KumB zXvj=ER}uX2M~LG0l5|#|4gY(Qp{^W#%0K3mWtuF`x;};d`g9c+3pLF0%Q!nNCckmJC;?)idv;Z`rCBNXb1_T8gtTsUL? z`B8(jl5L6qVaMh!O1E|UJU^Z>ht@6Srs}v>;olDYzxQu?B#rjcK8$Cd$GA7uoPPd! zwIoHjO{Jx>V6$Ac*16Vfz|awx%u(A=x@v)o!W4yPC1UGckP3*Lay4; z(;Jp<&~7RM3s3r& zX~Gl;rbyK~5Y@J-0haO}q575noaZk~$lSJ*FlxJeJGXf8p+wv_x}v97tM^ZF7x61S zGH2hiGe-h6D03B7Doz4%Jn)e9Y|HjX%5>CRc{g9@i)pzpz0$tzdY2sQ|Ki;LwjdWn zCmJ^P;SR1Ed7xxuACakrJTONbd(DK(%(sPBD#V^^_Ds&)x#w2r0uP@Sg|7)xK)O(! zQIG$&sTeKe$!2)%)cg)ICSPG5=p50?crBX|PGbKL5lB!;rI)jPH+oWyjyfhJky$)% zfKy1s>gP99AH)FogcmKX{Fx=nz+Bd=VP1-^)9c>4bc)Xf4BnmUueU>IfY$W%S%|)) zGY>y>nDGfQU49}KQ!GEx`n0xzNo#9Ke&XRYGRxOFILrghm`lI+?g^!4wwk#^w@lRovw_`S>>Qo1A5o_L1 z-CuwZP8RIdSp5l>(A?Y6BxhlMoD5a5O;@FZLm)|eqT*-%)PELA(7s^pjgwyPX+8i8 zmo=ay;}8A$$lyZgXYWUU(py_ka%KWP1FTiV;<1)8O|qRdWgy zW2*NB;(l7l^c<|6Ch;QLJ|92NkJz~!Dc{FvF%AvmU}8dr-x42pa*96sJgb}%7|2kK7vS7RUEN63EL;p%1Fv*FLL6S5iVg#;1yK>ua z@R8S2mF(Y@30T#Z=_j*u=iK?}r^t5|^4aCHR^aM%jPT_nrq(B4@P+;C<5ZuYNWSvX z(k-2nefi$Lt?Hj+?1ly#gRE34#j|I@51vd=#}4OuRyi0p@4Wq(W;Ctp5aJ&20P*f5 zC+Z%RVvx&8@je4(a>tKHfdys_#G!X8mgem*WD8O zVpp>!1?^ugH;;(oMG*|6eXoVkvNEvK;z&%pu+^Jo3TU$UoLn8HJsi7RX@fg66xii? z?NgaKqaHJ@x;l3iFP7T(l44-vj`Zkqz=SO z**O&=orOxJ!)0;a>D`>nGnaRt&Zcj3`RZ>>0cU@Om222A+zHqGEgr++yB?fkTGhn`i$iedwe z^~2lWSrpqtN(<8UOkFA71r>Z^DDh1UOt@=5gXIn6z(tgJddol{X4%0~80xcURcRql z^MmXYGAHw)?eKopJ$g9K?N{+r%oN9CxW4r?HYv}Hc&-~AjEJ7br|r=KEA8+na6yAI zm@&RjLXybeU{KH#HC!u{`Ze(Bs0z*{;!yS6<^9nPe(_(;26q*q$!!7`p zqe${-XglFsBesw4)0A|GINHiOU*E^+Tikw3t`>J{_{zu5_`Qv#T-mn5-2zU)c4~OV z|Kq}_maQR}$f^o=spRy8v7&1*fxTP7z`OU}UqgU>N~IJ{+6}?uc(fHCK1_c6ni4*J zV7-&=Dm-wr^ue5&K~82nEF&?(H;OGA_BMgfiDnd^8V`rkb<*bQ>T zSt3)WUj{MwhBO48@d%;%de0hhBW3gn@`hv`Y+wV1^v%sYQR-f`h1F>zBN>DGv9oqa zk&H<_v<1}l23{V)`~6Snf=a=1cO2NJaIj5a-fbIZ2J;&;S-m@wW)MNR!BS1nV#-8>6=M2=DYQMo@9zR*FO%9Tewo>w&2 zRqT6Pt4uRri(yQ3Znu$pdCj@Ix-KDOFL3JBgLjJQjw1V5{E}sO(zAAG7wz8yAz)c4 zDMGFqRPdub%m}tpllImCma5?&kayl=fCq+Q?D|5P71eO#_I)p7FuGqawpIw;OXl}W z*qhyLqJs3%5P~8=Y$Y%fEP*$GaS2o2U)VP zz*;|I?;z@PGEK1?WUy8oRt1R^$(}qwD?(2~Z$ltZwunFo=G&*RNG=xq!DOYOFYc|Z zpfKd~d>ZIPVoOo|7pJbQ=6b8?*IwR2xQgUbgKP8FP%95Y)!k{ExKB*;TOL-g+A#6* z>-{i2d}1~7rQFJfLH3q_mQ9>p)v#JinPWHc94eK(-Tg;J5$D|5{N8X!-XFs&xqY3HZ^Xj{vo4-cdwYCh^w=3*gOQ*?g4u>sD*viU^!dS+ZkqAi)nx5z2|Wjgu4N zE?bTWuy#Y{OVue2T!#(?=GXNeUgv{AzV?7vsqC`W@Yg!4#l{}EA6s(|Md)P4eM~Iq zQC2v;_hAN7TE_h?Oy#D@ccsX7*_zQH+aX`{)&@b`YGTg#mxgd@i`BP}Dd?46-xK5W z)+S@?6*03V%X5lK&T_H?Tb$0VUIn|6v}@&`in6?qzW$rVyDHpHHk<*=@BJ@co@YlW zCG!(IB=GS_)@1Sun780WvPG~R+aFh6!il=QnuR}MN+=f&=np)+ygBY%bA9MJNK&k_ zcOCmm?vn|);xJYR)0Ks#Usut?otKf0pFlwxwqIaw==^+uLn03pNiZUe073Kn1aO>j z!DGyd*G%T#3Nc(jSlpa?)-i|NnU1H0C_K}0E)`NaRCRE;+Mt>9HZ1T*A@gs$e%io+ z(uwJo3s^7e{^pPaVp`|OobuSm!U(4TyvfD&D65+o2gz@os#)bWR)(u*k7vMnXA%_=Tq*o1!Qyym$c#Y1cC6mvyW1I{#{TON*BZV?&|XhjQM`>TfURZ z$yYQGu6Ze5eAbF2i|V>?PZCeTEL$%1UCJXYE?keBZq(u|J)w^I?*btGXM2A4*aBV{ zFifS+1W8HwEtSor+#CBBAZCcSG`i(|H)KlwSmdznJAV32IsJO3V2!ih(|lX+&`Q8S z^A66DmRs&mun~6T1p&N1H1{jD{W}$`iw@2Z?N)TBnj3t7EImA$M0*}&Ov-bVH&SP? zMkM`NyUPSo^9=wVr9FZvbB`LKojG|&L|%&mVURjv zm|~r8W$``va#+m%T)0)~a}Q+>DiTQ3V1Ve97VSlbEfZ4-FTDrFRO?Bz(Xe69wR0aQ}Gv}@!Awdh-7oj*@o}S zdPhuLLT`aZdkTs$4WZ6%A&!j&u>c(pOM~!}nZ}zG@iM6~NkID?2V?@~{9i zm%?9Xz1Z$9LJ?~I3Th>Rhrp)3kbI+{JxD2lOxeZk!^J$u8#4D9_n#ps>dftaBIu_6 z-n|B?Oq`R6JaX0u)I(+ON?(BLI4}A9{4i0H#=7^btYxUz(c4Sa&DrsJ?SG*@<1eoz zOd`?pVJ>6Y!O#`aqdQmDZ&@eh#o_<>+j$3`8K`f1URd|s5z<$))A!gxb_NQlzPA8=5UTD*4{u4MH}%WKJhAV~ z4X{wbT!JJ?&7yAL+f(~;9wBNZ>&T33H8Rb$`a6>n-q1~sisHxYLF%y>8gvimi}Xcj zoKO*%eO&#A0Uw)p)f8Qgo_bBBn1@4D(R)X&JLT5xjH=c28D&rI3BAB(%mvfk zRjXUyB}eCsJMOq0$hptXUR+~IQAx{pHuerAdZ_c_%bzL^8n?s-D|h-irF@~dH(mPb zIyj9y0ISeEVu$YZ5~ckNiRbYf&UXC6F)7q*;w~Y3R4&yak%@tXMD(5#qKf(J+h5>8L>gWpMV$jWmCtEJBh(DdyV@4DNt<@|0TdLK9rL?a}zl}YY z@p1by1hP#(d(4AyFcL`rhSrm;(}stA$e7HvI1PRPkAD~#PwdF0hnH$VT|uA&06wcZ z^<1;hD7FC#;j^684#x(hXIkk}eg1-#Nb^w;l6KTZXs3{MS)>XDnc}<$HeBiP-~yN_ zfxx{lbUSsjT5`UKx9Ly7ZNN0NX_JG;QmZGv9T=>|0T{%5%qvpMH@iQYS!liaETqhn zAQs55zn`ivU;QkO?UG9pTN?S)`(Hc>t4X8w_o^#R4mE)Fo5h(V96C%DG5_B4y3FB)A3z>(N%OfOeP3g|MU!>8(SyPiOl#MfmU_C; zZbBLZ(|mY@^pc*TXAlPsgzr)tF$(_kB~zwXb7oK&eQCH{^boiGST+JA$t9BO3p3lJ z$&`VeuLGymb^Twt=u+{_IWzeme_vn{vA(&^SsEw(+UF=kP0tGjl?OyO3@$8Gk!0j+JHjyi@;kMnQWfv_-4CrxM`_*K5 z=)LKG*`Wot##gSC2hC3PU%iV->-=>0M%g~%(lel8x2HtGReCfW#+1)3ykR-C% z<6J1WdNeizy{7}?5~3{sf@MV03(kCJW@3iimt@y$sh7#EiMKS7x1w-te5cSJgekZ{AK+?ESgjk0 z9&|PbHYiRC+`t-l8@}1@yn!FhxQxHTa5?So-dQ zQ}T}~;XcOf|2JFOzg^Ex;}D8pu*L8&r|f3_4HLx3==?wO(Mb~F1ktLB^yoA}NLeTH zp@kYbr0+#j3OGnRWC%Zn)4cevpw8=xBdDSDFtS6i)MCu$zy=CtgC%927(N&9*iz)m zFEq0wGPEMTI19~4Pd$@xlZlt-v4;5n^!s^lL^RiL_X@bSnYEy96P3>5b5Qy~J7_gh z0bz6f+`U9_FRecdMm&4+=x`ZLCAW8`;og)E|93V3eCtAnefhF)q9>A*yYCM&s1)=_^AwE*;22r!9e?|R~?g1y@9 zgIh{EKeL&d-@Q8bOkqwNEm5o~u@7AE6bA%@>YNrU%V)~;%*EVDCX;Dl^oQeK0v9EV zC`l&ktD+X-wnf&!>Yglt1!s^Fhg|bqn>|)%2o|m0A>Zg7HH%SX{W%Ci4!B=$4aKN* z%H(>jm%SJJa9`Hdjl)P)2SH)-u>hBqJA=)0L}FTq3aA_m6h<^O`y=}8neU+BKa*f1 z46)Q%#ZD-zB3)$bXJhNE`}=vNN~sjwuuJPx7T)2d9*n?x%j3@_L1Ebiz1(G~z%)575Hg$%ef8GK)24QQ z{}j<(7*juWbULS%tYWmDFXJ_QDua3im$l4}za?!qY z>hM$_eR?PQKNE&;22bh&0Fz$40z<{om+n04!eEF-0^ZiY+vnox)!hCWpr42p^vDtlYJK?JymPsNvne*7J8r z9xryK44!$P1hd6pv-OqKaaW|5zBv!Lh;kU*KptQ)a>fD=WRpRHrC^XTz5Wu1mz@;N z+SX&HCEW1ru^ULcDB5%jDM!&~dVqugFNc9hx=u^{{~e@9GwB%b7=d3lp#}l3JweCo zLz#e<_K<&5UPM2`LI&tJc-@P3zKh8m4eukMI&C$D(NZ1q? zk?ZU%si)MwW`qd?2#Wv~D!DiY?RyQf`^M4*v)BzL5YUG2G8kx(?6X+tB#Hi6Z$aL3 zrzD?i91y6`E`Gl%G!fg#jq%*6OId;alYP*fqa zC=O1JWfuU54yttzaQ#=4xpbkq8dT6eE}?idzZ^(fxZkB3bsQQI#6Q(p>KU6hF$qEX zCSqCvIdDKjAVy6#v(~teseLc#up9i~XeY=!U+-PkvDYcVEcQX{ajHU_?noOEN&sd1 zo+Es<3Be=GUQHq2G+A@S%cQeqZi@s}%pjZ2LoisOsy9?YMvQol9_poT0VZgV5VWtTy^W}Z1|YL+3hjFl1r3U%hrhaj=JRInqp=^Vfo?h? zL!hL!xq{@lnf8s`@?q}CR|9XKFciih&py^_(VaA~V^+*Q?^47WJeHhBGJ@nRmU4)*h_w<;w|@wn+}F@^M{hBm z;1CD-$kvuz0EMf?IQJ#71E1f-3c_mKN7P)`x-YVA3aewU#*{J1jjqOM7GL?*K9@?b zhJ3DpTR_UfX#s}(T)4w%%l)r@aymIBPhDB`rmRByhpM856Lj`iQ_I6iCMN=3Et^Aw zS5he%gJ5&~gc#Agrr`Kf>zYDY<=H9Wn_l@(tfT1e1C zDKDou%*xrn-ZBNWg+`gU*Yhfgyhp&^k*%V1yRMC89zGNgH*|XM`ftjyJbjdnZ|pg8 zUCrG~yZznceuz6h<0S<{=gRQM7*+T!7N)3#v04V+Hy^(laM-axi)4h4i7$L=Y$=K|ej> zZ08X6Z=(hT-ccyDkLKSGc1EfSP%y1LNA#lkVdUaxf1>CRWEcx5-U<{5NvJl;1Dw;d zuB5!Zmd9=4Ywi-7pKrgyBKZL?^PhR^nL9COdGvaz zI`QrA`2EvN0jMmCg`BnV*`C2*Xwvm#6%J~ zgo)4qE}LHx1Yw8t^C#3WcL%;-@&SGfUiTiO0&9(91{=2j5zsSn2`3r9pYjobIF|+6 zZo#1TWxCwGt;xz=11a2%tb5vb1K+N6VPf~(Z!nxoP78E2zc}zu+hQ&KYzUt^$LOd> z`lm|2@@|pDE6lwC$Nrdl(=YhLWi`wDSGvokfpaQd&#v2DZk;muV8hV`+q_R(KVno; zVR8EeBV^KUEu6FGCDK&iS@JJyIJ%7RP+@qmf$9{sDgAwW{dB7=PfIr|ZTuw4Gtlb* zV;bod}1UYs-`wJPQPl^y!h)KDcOt*-h)uqC8 zwF+?ymPIpX`ehMMt&m8J8pFXY2J{gC=|n_JGJUjnCPqJ+|4%@@8c1J+VhHK^f_MWR z%KiefzI;ZN%2BNh=J*$Uzh5i9B%r&)i5{sKFMlFKZ?rmaY-O zWxTmfW;AakY~|(iaXz|YD@;fLo`x4c2RjS+rmKGl7ra|&_!|?#B5+c{H-mv2m97{B z22VN?jSAX}bAJlv(V0u)1%g_YmGX1dbV9VLPBU8I!Ji#35=xp^aD*)^K}h zkL2fI@=G`?A)X3@9F<_mMDFad1dL075(2&7$zGgJxtao@>EQW$g5B>59_@=8fs)KD z{SxAaT%9EXxJ3h-IDW|Z{FG}?_85of4!Y5cSt|;V4#$$a40U}eANg>u3~wadrGknJ z-+Hg4?U(1psK}SRt?|nUp5|OzDTarLNn;%D2~*$`df*wy`eW=@(4MSWQ-cues=1DQy*H z{HJ3itTDm;>0D!2i}mLn2p?q+sHLk_=jwz%G!wv&*sH}nf7q`onXr+qPd?fQLZR1M zPSFU>aD5xcD@lh~s-Da}?hJZqho*3Xx|I0+csqS2YIB%~>@fz|g zu`bGmjS zLAMMRHFi-Ye)ERvM)Iq|i)IcjdL_%oJ&Vt+TM67-)RVS+Wj{ft4*Q)F!dKEpbVXaM zM?d^qni-EKb9{u1K&xm(Ka}r5&B3Wm0*30|VFbZF36ZH&`kG{x4*%8defVF_{m03l zS#)EG=2}q3Ie1uw(2Ra0*1wt<%qtBklZ4{=V8?W#z{b#Fz@h#rVro|hPk{Xuswx)A zTui;HPuWj>)#``&uruo0(f6Df2}ip9k{)<>&Stq=W;$!bOHxtU&2_0x-cgEl6=tM~ zC|BbJl|Z1#knsxK&zK0%u+APM9vp~MP!dUcMU)fKELodspH*wv=X$#Bv!g>B1IBs{ z(MIKuzBAs6f7Qs*LnozM=ns46R( zuNk``ijXr9NI%$_Rn4b|B*7h$_(8M@SfcKF=sp*y-2PW{u%*p<0+{5K-yr}muf3Vz@7M0_p*x(&R|WILw;FXZUrdZNySCH zpcM>mEM=+VU2Tx?3!#%!qd9|I*PebXec)}SZ@1<8vS4Qzh<&X5O$>Gm zZxkClnXMY{MlVVQ>;1s@U*Ui-#kp5dK=|?#Qb#pG(Xul0H8?ZrI;m))saJspj^AGG zb&gi}Lvzh=lbpG9og+$+J~JLXge1wAp!xPey8k&!61P$T?Mq!mK%h9wfelb{2Wz=Z zS_|%rZWf4%|EFc$XIxwlE8jSMT)MkrvNzy6lJcBazx)=y2rTRGx}ldQx?|XlbLh8( z$FcO3C1eMvmNIGgz7|n#Dp*Y*1i2`^{6KxP!^2|89}wy?l*Cv7a&erc#7g&_a@2(l zf~f1qLSQTqKJ^5C^%Vxhks?PDnGGw&o!xmeCO)^>V_`&zp@7^qw1kj`)5F{0(~2pe zXqR!~RmfhQ5GUyz!g8yD6D2G&POeds_A|j{%E@Kra@E$KQqQKC0W>MDJWPBXf5@iweWe46c zR5(4njDY5YH3H?K8Wo?yF$IDk01~EZ?@1YiYOUhM`sX~@%7qa zRE;}mi1$`bbQ0@%2WOHXADulods5cjSyUI3d2q1k3aSlRd0iz00JD5RIX z(SLmtU8Xc~*7j=HL1Fdk>rnN;jLA}r^VMYCduTm=3JCPmS14SWDwl6Sg0)+zZpj}U z%60+f5ZN>xGQ<80hl$VQp1$IAGKZ9+ISnJB+ae(lBDAkf7weKl-$x5OF2RUh{dh=Y_F(pSK!_X2 z;n>rCSU8MU1(=q`$w8fPhS<-5PI#laM(k4Mr@(!~Mhcw(9m>b!K(&*5aK%|{S~B=q zzQYgc9BNzQSDt>jjk|$t9lT8G=*blUM~b2M4jD%ZDI@pn2MAfWSzELFwl*-=z~f(&^Q#+P`AAz(Ijz4@pf1{p#};Q#L8#ZRK66 z#Kk@i<5G2&otf&AOur6vDv9y88uvZ)rACLc>dt=F8)sIDhyHX-$Qk6^J?cuDxK%MX zw161`WOC-`oU(G1Hl=5bz{I5qZz0iy+*htO2Crha;0a6+nh}za?@u?XGXf(nn(P=N zU8kBv#)uba8K!G7mGKhWUwHc9@z z+P`I+WI7Q40dBJ{sl{=Y_i^0dXZfdeQS#Duk|3d@vH;BA8S|w3kRCo?b~-CPXA<{A z-f_G;pHtwg$ZtR3SAHu2dRJ$3(AB~exIye)+>_Kk`i?fn(!?le z`%56+h2TH&!XdUj0IqH@+4>-(sfeth4)Ak)K_ButAomTTzd69OKnVi0Mzl1F zLVpJyX$WdOeD8KWDUa4a$9cJApjcIIsgS5A)Yt%7bC2>WNW3BTJ1J0)$461hU|Z8h z-|+&^CF~ujtKyVQ)@6Uh-dnty_w~lrsa^%6BnT;(h4irzfbfvwYO*6+CWpq|By*%J zbl{ZEC+TUFKS7y8GWJBT0+S+|EJlY&TJt`x$M6~p{FiE3`^CgQ4{9~H{Ns9?g?S`2 zTPkH#DY|{HDe?F~qQA2%0KIqMT+)NYvGo!UtHEZ~eLyLnKVD^?IQdaqcohc@;SMUb zZP!~J_h^S&y|&bxQ_J5E<`uOd?2;LHKe%Jh}kM zdopah(-gB8FQMG3Q!Y&|$KUs5YVlG73G@OJy&uW> z_&G^e!EKui?}F;X2J2HTb?u|`_%w>SZlZ;@s>gHB5deP4zcB8lUM2${iUVY~}WEQJKFxuCQKsW6DvsTuww_t9aQ_a!{N zfm40r<2jhDm)+I*UDL6Hv!8eMm!IGu=Q%4heyOIu^x(mP~V*k{pfi_<+1P33OBY$+1883H`$iU|bm z3gUPD>*7!I@h$Dv3Wx^>`)RBLgj|>TOCNvcjKi4WPWhXPxilU0frs`F-J_eU)6yby z6`TyTF9Z1#BNM>DAkYeg_I;yS>{+Pz=fy{p$;L!t$4mMj5IT60SX1_7kbNJ06w`?5eSNpf*5fH;AWzQvvmK3u7KMg9uh##v6P++lwu@+JhiA6Nvp zS(99LMGov<)2q5fEP}ujgGwlZyv)^GM+uZDK?S0RjeF(=oyQG1iHeJ6B}xmZe;UGe zD9a7ZVf#qc6@}6iDJI)HFdBeNlpP;$P6ca{drS-3F+#~?=QAcj3iA@8C4+wQ5%LX2 z_iD1G1D5_F1;k{+8m;&>$teU@7XsYiKHbhsV{=bQn$YsY%uK60K+6}px0omZsijK4 zAotRcz2k1yNnCC#m%s_;%c~Yn@nmDU0IxS)pUcbt>b2r+6yjQ^ZfXDtqW**5ds_?P zdc3>W#_>f{h&ckNmHn>Ii>BhaRQWvg-0m;4;wf}c-fSUrsaWx&+Y(;A9`glyA3l8= z`Ku-YfgE;G7uIdkGVw)KL)D9S2Byqx98&lm05Mcz3ct66kh+_zXyhK5n)vpq?a@iv zxxQ;KuGc=7g`^*$2q!)EkjD>_NkS^1kRyfewXCseHoprkAXZ>nlI`Uinp=vt5j|y z4%l}oZaq_k=BhO-|D^u6_3hkWDH44e|sA$cku*9`8aJ4_u6 zh`WCtrYJ}tft}jHrdYI`X5Bj2{Y30_v=57rbj4N28=y-v_j>e4y+bK+d=yv}$RAnO z?9DcTs%v>SYVPea;gZ0Nn*f6(ti%0yT;|IP#`bG44NNx39k%5QoL*I;U z1QpKwZl-Nb`};hA2TK!>vnahK5cIC_t&U#GR*%R3W9q%*sea%1@s|;AWt388)z~G>N8PXc zzOVJXp4ao1L^zBH`1iF#^ifU03zhnKpUI^aaa)+o18jsg=1anNuBVQvHiq9SO%$^q ze+tU#&W%)JhlzyBL^$u#PXyOXL`2aaw$l9^E38@n_LaJk?m$nO2H>OBSYonZV++%d zU2w%{J`2?x*~(8`uk!B?@AHgQ;aJ|s=TI$4zy0~;Z5vs$nxPt#1L`x8 zyDCrPf@FPGv?{FaW{y}dbDw4#9d(#TF|3@N!FIr~ZeEZ=kl~J+YtBO^#62~-+0RTw z?RSLBlEPGj?IEvtLbAr7e*P|9@d{g?z*^@g%z)&$+Gs;28b{mtdP5CEvD9^ZV*sQi zY2-zqG1(d1DnbxPQQ(boubjXPfly(%_a00eUi<2^RT{YvWrg<(|@SrDFLe@HwYKBO8CDAM22I;T$k|SlNd^doOsF zdy+LKm|c*{sapvCqmV**Nf?t z+q0oVeyn70weqc;s!jespRhwjQmaISLKDzHHvE<;04dKt8dnu{l!D-rhfC$Ao|aW* zuzy;!jZ5sOGRe$-=~}#BGuX-P@YqzOVN= zfwrY2bOg{i^jfKmePIM*NZ#uxbx|d_R?9aY;LTzW`R%)BNcU4u@;%AD);Y-hJyCV& zDrfI+=2Cv5zo_>qs?I*Go%@Gvc-c48tsaZdEcY({fT*RsdVMz1q;jidD4NtGM%iux z^ep{YpKrhngdtsro!w;onE-GcKsl0mrO1 ziP!7}bl9DZ*O$%=ZeP7n7!t#8FH~^hWme7*|E%+MJBegSn_bvP|@uy_|o2)-puT59X=;Ct_wHHdoK7WaxftjO95sTx=KzAK1e&0dtZ{_PH3a0Bq)cp}jhCusX{P?|G4M!NvL|^T z;QR9o3Kha&Sbc&xORoa3_}d*B1+LsqQbAtjHvy1zrPLzf<)Bv=ujzJBxY0ar1x z;k1}_3vM^ykgT)cE5}ee8OnmR?^TZ0e;?qIKIg7!b70SAs%pwbJh#&7{?bY0>}V{c z*!aupR)bg1#3b!_1BJk$qblAzuMZK)QXG|2(sc2N*$TBv?brTs<<@uAR{_uNKiMz$ z@*yyP5};&rBuijAPtg&PlQUM;6|w3nzgSj6jf{E0rnj6n|KspkG-jPmNy5`{Aa;W` z=Wpx%qGupXV zF*5#}I*(ka+s&^FxD{&Mf1|$c{!lpElG%P!u%i+(%&q4B1Mw2NZf-SKr?lHd?4lLq2dK@$wi6S#9h*JaO`E zO#HzWhuIzt*4dG3L@~$C=Q2%OH@z4Uc>)wZmC8cKD>#{xiByW~I$+Y-7-VpGhFHpn zn)D`GpXB2}i45GXekb~p&je?nFERn1tBV4{JMkyXcwUf+|HbG9o+G4lb@vj(%B z8gZu}Lf=qF{i?GSuE(*H6hj(?n=xz7>S$iG6=ouB*A7+xc=Nop^z5TF@kG1|+^IUE zjRqa^*{&~XKqG2k2hO73ErLp%*CJwlG=sK9300>kXNPoyzSiwHVqXoRVZYXPFt^+I zr-PK2=7P^iGVP7&sg@6Q8y;mU3DA|46nEStN!Da&5LGlUomjGPTHKE;PHO?gU0<2= zysJtzt+t1TV!PY)l_&5BStNR_3y4<@KHCaXeJAN-9Gvo!CR!5ja$W|L{@fX|0*RYi z{&&6G-Sr*a`R|akNoh>D;5)2oRVNvynpTqW7`RuDWE7kQCM5*aOS9nfKif4V@*U3R zqw@J~i0P!A7@`>*|BL3qe7v>5`FzHQ?}rg;LvA-~KU@-PD*1&`YY%s&BE zH8LAf7cNs$)0Q~cHPcHj2+X3ad>wn(+j@b`Tg_NHyXSj$PAmj8-{Y;Fgw@;qP|FZy zv=u1g?g*Z|t^oA#x|QHg(;S`)@ieEj6$Fbqu5 zY^Dw^>NIk?H74VI&MmKqX9d*qI?~VrlMqkE#nw5sXN=y1$^h zFe6kOn<8M*>g0GwWYCo#931k~FPGM_U17AR$^W5?Ft+p42C@Abh_=uO1@zw-O7|zv zE1j&jyR81)sCmUNY@={!XU<%pdzV25hhZ@C`QnZt z{Yd@v`rY`L<7JcvWchY>U6yi|{nuuYaEa3qsEbh@Q;omeJDObK+dQF|KiQ_3#Yb_C zmp0{y3@*>p$DxOF_VK^N3I9|NUbRYv;b#x%4Un<`q`Z_4Q%a|2SaBVR{540TxsFoV zX@1v+lXW#+JR;^ada!5I#K8ovC`(^h0yiIfci@>)X37 zDkl18_cI)=dop7&_nU)rpr1p@-RS<;EyWG5bDjC@X4jn9elXqqB?)9egm4Ik@EK)0 zU;aCuPX1;;E`SCC*xyI6S&QF)t^eDPfO4UcaKHJc$0uDJpkcvke~cAzTfZPPb6^V= z?{lXiJCOr~csy=u3dHH_-LgmwgV`q_Dd3HYzcy2V%)FU8C;Aw#qem+i)8XRo;r5>& z?xbe0d4Uwsrge=6u@Bbw3+}(Jen7ive-)2+{i8>Jt!`}%TseOCj?Z%DWywQ0<&+A) z-if*+T`T=#PB%o05*gFh4S{?@Q|{`t$$~qp^U@_A?TY-K zZSs7G=M?g}rbhO2Y}r5mOG=z~F!H7ku@`)7$)haz2e5=J98&V}>A%=NW{dgK3g=MwmS-KgNOR!PQ4vj-vf9BNn+!^`eRIJ;rH%ni=kI6#n2gmi zIDO@sJILHTrj!((&zDp3icv#gY|k_kRVLwa3lVVjS6{WWHT?bbEcLrD0s(5=i;X#% zO}?9JT@s~>aZk>WFVYw$A~Mw|uQlz*SysnETiCo$yO^mzQQe9ev~?BHv=( z!?s}T0kC)oX$G*jn9idUJMz*<@wzhTSfFssO<)or1D(JzoqU4T>3Ct@hI_nnS|xc+ zR8BdChDYIb(J7aD4N5ug_#-)vfJBM!CD&8++$w|GTpFYkksQkr!GbO3V|cv#mPxPT z_~#+3xlz``Zxoy4;FSTwYf5BKVt-2VnJ~$rzQicbKq5?5qM2$-5YPPpNrv#M`g^pQ zlLVfL9eoTB5Ws^h$l{JI6=}o&dJ97+jdKu;9YJsvJPhGx^+fPw$nUpVeaJ=4Si0Ct zI^?_FC2~j6t-MbD8e-Aseb`3XX*GXJRyk~}SpBSUu}R{39drN8KFgAxT*jN!;pvT{ zUVy}mMm43o)jr3Qw?K_zW&TiMnMs34x)2XEvpEG_b{;Sfjh1qEHCH=_t(YFx;ePgg z642nDrmZ+faOH=q`PH3vlWAKl$91TCJv$%Dw9Ow`n-h?U?F|1^3pI~_%vN(6cvFLH zLuy3r&FS;Z7FES?%Mrr4KUCa^>x~;X7n+D3Fo4{#pZrKk?`p51qbKLa0;6Iz(XJw9 z{iSI2F}I&Rn=#97Q)}1V`nh#?3}sds5ZAAFpQ_DEp1j_#%khQ$G*gh-Y?GZ~^|4GzB^yA8kZltADgY5%J^N0EZezBe z8paSbSnZffA64FAtoy`I36w@!(t8-?W_;<8aMEx#ZHw-*T*a$IG=SZzO*)Raet&;| zvr|C%3K?$YZXG`iV|V%W!-xlypiTU+h@Zl(v5Gp2$XH8#1qJV%(pZ{&YB}w0cO={r zCe|g`#jcl)FzbexfLzzTwUCap32G{}zS9=hwv7D)!bg*5f>deuZ%zejChtO3|GFA( zEZ=zJmI33!yJsS(5>Jcet_k@JhxGk5tG!J*Nmd2#uRT0Y{=9%-IGLlvXFl#!42a!x zcAxzSHjf7j>C-&?BS@#n@>88>yX9@Qk-SCtk9Pg0FuS1}bIra(GF=m}(z$xdf%g#x z=~I>Fh7Bp1{^CE_OZI|Xw_Pq^qy44?Cokm!}=W{IyUs8B)lT)A>5kzVMX6#X(}X1J;@a zO!?!a^TItF69Gj2OVICMJ|#dk_*w|lS*6*ym4TXUG)j8zHB?jjlm74CxAJ_zR$Vgq z6}=x@taX9&cygZLjOEIXdwv7wPZiqPREDz9{Ip3kmNu-CL-%2OIS&`cs`6xhl||S2 zm;LSC(=_4rE5#FX(`Lo}kCHY6nD+WYeRahL%9vI-nWG)Do}VGju zH(tqf_z5ZsU;B}#qa#!aLh5C^k}ot1ll->B)Io}z(f*(zJY=9Ij9Wc zssU=^i}-bR|Ega8*A;I#0zMF(p+Wpm`oD7`K8)M#uEDnbJ<7`6n|)=$OVL9|+30Q# zj2In`khd>?FE(z)xUu1Haz5K{^Czyl5FyYuq)=O*HZA-*;Tm$uYb$$DX53(R!IvE5 zT~cw0neLjZLiT)1W^*Y;IR30@Y??lflSG>xMnPRi{+jHaLzto`B`U7&&xnD@xmTYK$ zxeNDrx#ark&Xw|PZN(;s&&A@ipPwb`i;<-cXCEwH#qvWcq~9HH8IWO~!V(2e_pLS* zH2k(*S5H|5A3FCmH&g%P#*^E-6sJ2X{qZil1luG*H~$7~^R1?yxHPZWfz9 z@Hm^e}`S`OjubdN-v4KoQmH#eiCc@?X;QC7{0(Kr@y0C zCyYnR>1dibk=%8x-99d7yRSMM}!a6y$bG#zU1p3wNbf`EC|KQypd{H zFXhdd!C_}H-bZu38U>qPn~Udt8obwY-`D0+8M-}OzXJ6)!4-Us1gw5>`6^s-0%4q;_}L(L{s?BAC$%H32mPjW2<$2_^{2}R;Et3&fQO9p8D^6 zvWf_q}-jFs_u~0O>XE@@iEEE6|6mCtMn}r-a+Q6 zx8{L6;qfp*^!mX@!Fa#yOaAGPo^-A0pGukBI2f;HI{X&g3_G44dW!Clp$9*tZK;=$ zNm{3cVdBHAjuRJJYl`Q4%1MujR9s%oudK-+ELSB10rlingc=E7J3_b#&0JgAhitB1`|$=f>su+CPyR%mr2WX? zoz<|F);kp5q<>OsDdfuM_QYs0`-?2*m4fj9VntR$%ShBP0{a+)1Wu?C2fj9WVl7cO|8BP+XVK5+{=TMa$4?zkJ$6^@@hM97 znZel1^*Ddv)1v-H#s?k4U*hi=x^YB+68B^uTU*zUemzKg#I}8DG=t>_B1|HbG~-1P zZ1HXZO_s9hMQ8oZ%gX^!TSgW&@4?KAZ*QGs>x;qTLIr~-Z0DDQyqkEw+_s6=HeDKN z1@0)Tu3h_D)7ID&3(&5-#|hSd1*lrq=>7tpmFD-7WDdpp4ABEaBmXdMT!i8Q@)kfv zYmq?-S_H>9)Ype+auM+mx1h>}sZ?z8(1YN7K0bw`ES09GUWQ_rT3=)eXLC7AXG5w&=BR3*a zyU0FRu{UDLXT>FHfIq8v8n;5=B=#a-=R@P?&CP8es`(@>KT=e_~GJ{#}n3dgaJq0TtnVTj7wOq`u=D}GZV0sdDjXdQRPB}jmq5D z)QXqpZP?&Ntorjr!Kf4#%5VP$ih@4BloKWAotm#^{7IyfQ-m_>U8WEp$X47qnhw)|qx2U znm~E%y!!U}M=qy2l3Z;dM3qSOy{1R@biO=h zNWa~3Q9aA-oO^u+XnX8jy!mN6BwxWukJokFZKO!B<1Lp1=jqzRIzrFBm%*AhG3c5H z^l$6Mw6+|w)BKY`o7QGm|6S37d=xZB+AwMlB(jGU1Ye;gU#$%_LV6>NQHxJ*bw)}2 zpv=lA1hmLMR7VMw*K>Zo!-B-C<;q75lA(XPu+Q0&`J5s(=Q!ySXFuj_Mznl7z^WNM z?f=;^r#G7FB0}&RHP2M7EXnB+`?MR7*)NVtqnv0Kdo41fd}}^ra*U%dCp#Oaa9%KI z$c-Dq^)OjcnY5=v)V90%)rFbT(-NsH&H)>)NbkPhbbJ4IROxap1ZB2I|7iT1O3FBq z=yq={TRyxI248huNf1@PfX3q4hM{gTG%Y{^CTP?~9vC#6CzjrKtJ8^rLRwb;YoJBn zYm zd#mNQL~-L*7BRj6tP(@@%ub5oXQ*!C05I~ept^#AT#3Lk0H6~#9EH7&oj;m7A4wML z5F;;nKmT20PcjTh)qM|XuVw*t*6WMue^Z^wP)OHCT1ew;-OIk!FjDUKyf_?@E~xV# z(wR8dNx2nuKARaLq8)>02 z2&iJL(^7pTO<|6heMry|tWrpKvF9m5@kCJ-`-xwRe*ij4mO;vkpM1%#2k6xr3(Wf#peR)==!g9q4l+7&y8@#=!`TK zAxbcW9)pRHi6J_IGovbkcr{ZKm#9!Ts0I<4b@`rn`@{e2TBg&qgt(UD==o% z79S&~xV#PYK)?vZX7Lgi_=ywp(^aC9uf$HGu_ngfvn{6Iwfgm_&Jilr3 zc_YI^Wf7@$A@;2i`x2&zdni3XR(o5c_uj^L8J=%EZ_S#5&L&>LG=&Ygbx4m!0!Q+V zLBBOqX4G_L=&;L*OS~i!Ua2Gcp-yw3UzFMCFkTeI+xo=ke>x=qwV~N37{~4Am$6P3 z?VThJa83@E-%T%k*#P0^#zNC1irBG8v$%>g0crlS}Y6%O_mJ6BjF1Otxe|G zQqSs)kflOrUFvdXxIueWuWA|-NatFs=4?3yLhJk2d#{$!)m`x)FP+BkT~vn>_S?G$ zea95N&wjX?#7N=TFA&;Tk=OB=zim0ZNX_aJH$Uh%eK@Ry4(Ij-q%J@2B{aGpQ2$XQ z(kI&Vi+qhKt+xdj%pbddP zhq|Lx+2v{tdyu=dI0s^xHCl-_$LG_xWY5=^cgcSJxq~13oQXEg<6-_~3n5 zb*nG8Bb3DACq*jYwA7?L^sdfA!p+?7XHKYG`LJj5X~eUxiEIDz`{w*F-G&bqSF#tp zdUs#d4k`z~k@@obGUrU4tZN>|bxINqULt7l`lx%mn^S&`wnJJWZ7 z-Gj0--@OskAKfkuRWlqRk`4Evb ztn7YV@zA#c@2K^&rqSzPN@M77Mr&z%L``f+qZ~|F>X!2XBw?d^?McNXA19zVD37?W&M{Oeo%+YDgtE? z{Ye*Lb)}${fMn=%139@ImhF?TMvbWHt}2Mnew>7B3#5l1+Q(Vvqj9`ORM8!<+92Tg zsr5FUE0ufvlf2JYEV%EYy47sp8{CE2_07cF9@qC*mG{Ud=N);P*{5HG-F6Dm0=&1r zD8EiP>y7>%kQ_ckWAF8~eb%4NJ|;;e$azC*>t}e*^CPkS9(ke&9yKVR-JZ7e>~ksL zd7t^V_(4mdk_^|@#H5Ie4tR`MT=*UK6xn->sXuIREBrJq8yW_BL}coBWb$fV)w&WS z;NhGgGV(VeEg46}Co+PzknWqa@;RrZ4gZH0a;vhcs|dnT13EY~yDCC$iY3k`2&!Xd zAC+|)tNVWIH_{vI8>yRJ|2v|8%LCgn^=On+6O_=+y1&fGEeM3>_Tz06`Bgso52jx7 zo&AprNlgy=^7E?7V~XsqI0&8ks;@qR5%`{C5-yhMgQ|d|E*UH@)DF5ullnW((ob;S zkt#W)b%+2r)qA+5}$uHWO-_wQBgUZ~JewiSt zyYPe-$bT{NJ%5x$%eX5+Jm)uQX>7)3&+BD|4t&WcKAveKDz7?_RSL_c?zH zGOV7WCqi2T$O?8s06AITd@GDzv>BEd3cfOhMIS+O%ps3gi$DG02tqij1;=t6y_JZ0 zEB$f8i82s*H$wj<=Uh|J={R&V{Eu{5C`axcROJag5V!1CiF)65%`kPvg4wIPVh>|J6I=z z+5h${K>>}l{|6}WaRh&J>j=fyyOcwT0+2g&IP^w{z%A`>Z$fpV9vST0&&v6AISQLF z?1FhgWml6e`BJhtORdh5OuqSMhmPxx@v9B-n59FbeUy4~=^XblU z!E9%`2cOMOpP3uO2pI_m#AI;%(4!Aosk5UYU^0>r3mi&?S2GS;Q6S=JQDZhI;5oth ze3N5A!ey`XQ~5Z7b%WI2p_&pwsiQTV3irX_y~=+v;~vL{j6bfvv+bj!b*+5^OAOX1 z7>WF`Ss#6LpOgs?C-tS`^D|Bu2@HT*#m(%lfzYZPBX;iC$#1t z1YJY`P^Ml9Ehb6mA}3ui>Q4GeV8}F%EtKi@tB6|YDM&3dufzS|6XL{%X9hj^-zNlI z<(HaE2+0pY_d5);cx>a8zuf2_Zf6{QC|D)+*Zq7~@NRtLMeLq; zuJ3kf8gEn#Ey06 zznh|snkEhWA<`0qb~qFPGYz!CG)XS|@rr40PnO2)8QH15k5LjRaDc}rYAOpsC64DT zN1q zpIg@Ou#+pwFx%v3#O(~sMr+;jVmq~*FSD)4Vee}`oKJ*Zza3?ry38y{6@CXaE@4pw zfr*_a%+h%VM2!bU-QiEzZ+<%%w8jp12`LwV%KFgJYR+Td)Ob#Do(R(*Tg})66g`so z_Uv1A6rr;iRerk++kDx0;9d$F{%2s$ctO^j<wNeh-`%6kM_!SzyWVBWOF%|U2hEBfw6=00E`Y*G9kyBF7X9|-+CI5~Gv?n& zE0tCYR`H-b>Tiw*x!LHCo^nObFs_Z(`MjgCz z#4{5-IG56neX4Zx$d5gH(qp-T0E<4Awu>u|tv-%3aXVBEvg&#s|1o{^7NXYFb0BLk zpfE>V?8lA#%*t1n77G@4LwZ(1-&e`9y%MGj)DA849vFBNdV2BNnfxp9xuS*qGR4?! zL7~-CW01Wvp#3^TZTScN#QGT<<72Q$1}|Xcqf0$OM1t&{G)>_*=napo-+3iJ)2qfu z1@C11(TmwMXH#Vi2yq~xL+pj~zW#1LnkE)Cw|0nw7g^ZI8^7)lCEVpg^ZI35zM+Jh9P9ma4jiR>@^Ak{W#IzdyQ&@vPKu}61{Iq&wY%soFIrPCh z9yQ}rFi0$JTAM{GsvQa=U*OF&`MAQVsZsW3MwL7nvk{_La)wd?5GfmJM(B4DA9<#p+x#c;om$6#mU`)L7yuk{`&ylA``siLO?s|o-;CnpzvL}`2tV7n=h`li( z?!34a?yK0oD2t&*?S=Z{?io`fgo8{( z19&C{e&j@<{sRe5_6m?grZ~_dPJF-n1zeln{}m=AeE`i5q2-kTfaJ>>>p4U<8F21gsb9&0G67UW<6Yg{5<#eTH1MntQ^!He7(?hpA(4M0Gl5t`6rdZp2y zriRgbt#mC@6mE-kBPpoS^rHu{w77jcfepRa_)wG7ok8H8AC?Pr!Vpv^TnzagpUZXUOhp-Jg^!bnMri(sF46AeRo_m%1*AozqfZDkOs)Up6yx2qC^TRt0| z$T!@$wR0i(IK+MiRPCkK0QO%k?tiv$5@qt(RY_P6UX;jSHX)T=a@Zv%0oQ-~hK|+~ ziN`b+(5ZVYC*o;g)P@6bknNVtm}Swy<@p^QeuzSP^L}RQ?aMK$pIo+K@J)FsS9`c> zV4UJ$Gx{iq4|>r)KWm4I9A0F;>V?U-rZdZrSMF>w3=LU@mHF0dnf>u!Rfr4jx%zp6 zj>|t$*x}u=-Q@Jbq)8&uH;v(eH(>l;9$-LbFDCvgpd264-mm$09t;1*o#70S!U*sG zokeXwr{CJl(HUlSzI_sxEg6q7ADuj?^^xn@hUeG>J0;ZjWc?HVZtKa%m3KG|SBHNA z44e>wqXxB-#@PyDE;BJ$qgPVgn}O+ z4PAa>Q}LLI;_+=IJNxiZpPyyldS*)gbdSMypxmv$ApN;yK7Ce|%kbzl$EgLZO}NFwP0Od$QiQ8wsg@) z$uD#5*A11UCT~tW$j%X7c-_6X-1PSzNxUe6jr&{EihP9-3AuO@=7d~Cbdh2+0d=P; z!oPxyF^TV^!NUOqK+A876YxwNh@&3V>;SC2o%DjJQqGD7xWVDP*V8L=TPZdFk(4nT z<};JEFGL4yV{&ce=BnI8JMn!hl^O3vxr}AFu|o>?>YFe7Bo1)tW?aggO9QdDs`Ja| zGGd8I;6uKsS+brFexnoo=E9-KAmeLEG5Fr-V>Mee$VWQmND6h6o5l8(0+-;ohTaBbg(kPq zOE95R2)!rzq0L(#;3>CDy|*S51A@iF`r5IKI0^?0w3 zqThfrP4w3i^M)3A0zp1B7jU*Dp@1R`|S)x{zk&D#u)T50oMcvVN(C%0d;Sf zM7^nPI~wR!%Zmr#WC;{GABnAAz%&(^TmKL9o|pFg-TD<$i$HEg!L5_7dD-3HJ&Ah; z3?=xZR(o-8!Lk~lzW24Y9ZO65&7i+3r1v=ycUQH#KUx=0LjWD)#S z&bmB8jdZ_9yLfr0gX2lJF0D=0m7yL|4Arx$y)fUBjU`_i)lWG3%U%sLTHMj-fAWPs zErJ>S+^yD8)BF6}w0ZwayWK4dE3-`1)6jWm?@BNQQlz!-TWsH0?;*v2SEMA@@u#=W zerWE`?3h`{O6Bi1#CkjxW}Q-EwPV}HLe{d$PgSlh2<&nRv~5#Plw!q@yD;vcgs&EJ z+KyQ*Wp$4aY~-icVHiX}%->{L71nuHW;~%Lhy&_=)>7VGRO^!2+9X5PdhFcAm2|6* zU_oyl`hjdC#W1e%T%_Y7*8zb3tD}+dS!E~t-@jd@38fwk8=G}(p;b+JuZ|9RKB}!# z1nR&C?${+Ax8woyvbA4Vld>ZPomkpsugg4ac$|(6cY^`7&*LGlWB_T-*a}m|jXwmA zUBtfOPg>AMWy#L92RRok3C$=;dat&!&k?CR_oKdxY>&PI2jqYU4dq6@Wn3G2MTj&c zcx@CA=j_l8cCY&T{G-~oO#?;6_kI0yen{P+{IDM7_Q~P<2+yzKX%)ab8QXXG|=CS||~Yh~B^VBIzA0oQFSe#?Tg7oS^ErK-XX^61RER9{e|?wTPuUdGCe}N@Hh3+Td{V1^tC1{jnL}`TAm9Mi^WT9Mt)C8NLR|IyCvZeF2+jFkX|S=Y|2z-$H7^pR&bY=JP~ z2x=(<|2u#P&oP7iB%vypmmp}MHY|uH#MlE9lbYH8%2mkf+wP`sd&eq@A;UE&H>vRb z98sdQ?bQ}^ClmbAR0l7Aq+#It?29X=_(ZMqeSvrW1{44YhE6UtQp>n)*Q~e%Bk;Z^ zB+sf0@R+}+vitC+XaPIeelcO^+qqlniws$)U|7<_2PUHD$r(CL%-PdRS*}4j>_w~{ z${a>`QMp9ER~RbutAeOlj#_e4Gz{8&iR>Bvzn_s|nDp4rz7Twq<-)h$8g(M#bJ|KOM-)4@)LJt-3uZt`$>u#WW}mr%I(Pkk{!c5mwy?|Jgr#Ra(n#oS7V zVVASfvc#$13O$MFdl5WcjFh7MS4SO!x!y{#@4N5woU}!xDqgk3ffQ4?>nb#^;i-cb zB@Q8VVZTI}!^y%--Mp9neYhs6k{2>g(N8BRrZ|u~4u5E+i{i%s$ife`Y;T2QNX|2; zzsp{p$P}TZSng)r0>axniT)sKw(&9c_pX1G9|?ulJwc}{CeQN1R8zEY#&+UY%K zlRw}h!+xv&1U1Pa1(6V};iVH`${^m!G&X*)KPUV2p9AQQAD(QDHVe7C6?Q-xDI_*w zerO*l_ee?POPWI$%as_u#^Y*_hUd&KB-{-aJa{Ow76m+JIRo%8(Z8Jcy8YT=lds2g zjMk$p_Zkw82!>X@UEi#`wo4k8oy&XnH{*!}xuagJzDjC3LMIQk06hqGh_tuhA}Yg4AInHlf61eKywEd~Uj?)k6z$3I zm{aw=q3?1__zCGUMSjg-*DsPxiT1joAEzUNu3-3FN`r=}}pCr20>Y5}Iy|JKOP|NmQK{!wOmRVgX62^nJVL(~2Yvo3>e z>;9itl~&SB1`RXx-%4cjewnZ@WPy_6NQEK6E7SocSF7zq91Zy>ae_ADSLJWLauB9& z@+0MFLyOLQ@AJW-*GH^1^f4d$u;HIRUm?G6L=FNw?H;M=1XYQQNBg^c1GKjlzA-5H zBg#TsoL@Z;kVEhhy{R0V{~<1IA(7BR2GxXrTRD?(2sU=-ZbFY5kqRloOgy9>9G8YT&cXm| zy>PpGdanP@DmoX<4F}KhB8{X8fXnmll2b03kZc|KA50g1#esylPNzG~glQlrFV~V> zDA0zSF%Y3je^RdB0(L)#V zhqtMam!6L)YV>;7W-@DD;LxUY8SDJ2WkU_St917FNO(B2j`APQPmZCSlO7%*6Q&7g z_x3$f9#$Lm>+kpRfYd8~!?iy`Hiw7T?|78u*h{V$w#DRG7#SH|v}}oj*W&Z+w7$oD zKCs}Sk;E*ZYoxaC{K3SD#Cl3-#aj38-50T*EeDA`Y2#@!Tl4jsG5ZXNA}G-nER;Ng1XOKS#_aj}=4KX|jB zxP0NS-Q;C`POBqco#*!yeel=&zQ_=_+$W~`Gvv?sSo#Dty<2VC8x~$o;&|uXpB_f@ zY0ydTOMh%|P>jN_3&i@)i3=ft7EdZfL{%LM+vksM{yZq6VGtcFob1c_B>Zhjk-Yyi zhDzoYg$NRBQ!6c*?uT~fLU1?(iD@n;dx29{eKggb8fVsoV_2Ntztk z`f@mz;g7qqqTBB6X^Yrran}EhdC-mDQxUK~DT5ucIP~M<6fT7z-^n9fSaL5zZN!d6 zM*UilL7vLsq9Jv*3j-`&k8a`y9ge4aEs{tw_`fTEJ_O(4FMawJ*}dgJz@+`JeN~(~ zAB7EZ+@-JCLib%-XA4UjIFv1KI@5F^^4-5B8nbkBu2^v13~=)AejL<)XG@&vxbpWa zU+?2D+a|RMg&uI#464)mR4QG;aYzK&xvc3X#SzHjl_QFACk>^;(p04y>bVC5-Otc( z)V?xt>yd{;&)cL6vxn6!7#ue38=#WlfdFV6E4pCK>L3okQO* zaIV;}c6)VB+))lNb}4%#NS~tTVqxmOY{5P7WT*Pp`I|yAm(n%J+JSL!=Kc)~VvD)L zgqxH9g$tj)9(AAXke?mP1uRi)A?VPL)b7UiBSa-aWbiX*V^N)dC5JRPq&2$|I86MdUuu^o>y(~$bZr!^Zs(IAx-kCvK z%X>Q1o^p{Sso>`fB&mi^w8`sUo~+4(w&(TsRhOR^QY3gxPuf~hD__ zG0%cgr@2t0`$HtT{t!4_HIUd(R%RAT_OMeAz5Lg1cP-AzL2Nb5?z7m!`@2`D8YC9K zIK+me8P#?rV_dAOUE>)SDH&5n2`2jkw}^IspEl`v{~b5?xPQBN;cKH>Ox`9_?6b&B zx)RyQ!7oV@7bRRYwa)VdRtb?_@aCoF5VyncrK($cT4m(C^pJmjXOyQfL!CIV&nZLA znBM22;YPRg|2sYJCQH|$uSOQ?#>L!Hx86U4Gt7;f`%sxT71xqwZdCL6?3eJi*KX!f zQW%`2;i#-R)^Udo#}O06G@eP-e~r#p4z!gZeG zERF9NiSi%1!u5Oj;?fjmOo!x!34^0g^e!j`wx{ zfBV*Z8fV<0Y`?euB_e3U;Zg}_*um9PEgAM^E~C$%d9S{)0moo$KqN73417{)^rie_ zu3r?joByhU-Y+XTx3I$_AKqKOu1mVWHLw;bIA_RP{Y+`;!w79XFTM61`dp%X>#{IG zK8A+zS=Vbp^0sfenID`dwN|T4c!7lmtSx_8a_dC-{orr(S7MenhuoJ<>JsCIdVLXYoAKMtR>Ero~|abRQ*7 zzDYXm{(bD-K20IGQd>U9~Zm#AT2}C>Lfk?J3)7booflYyeDO{V{ZJQ|bl99J8oBD5+O5`n) zIUWQloV-Xkw#4|7Zr#t~m5bh`PbH`%!94HVWi*=>Y=O35^CX<9Uhx)v%Dz3_ahYZR z*Ydabn?90G#O>oY8SX9pc;ZvoP*c&P)v98ear6nYQz%$pEHk>S^{m_U{=otSdkR93 zA%LtEo}lqGi{_@&m*sSg;U-sF-^8-?a>@2iG@6>UqVhR!k)|A(?`u|GzqNdtn*Vnn z5m6&=^DjSCU%5g~yAngaxR*7lU=rfWQnIR|>!R5nfC@2HTW~!xlnV?}6cMrbAqsYM zyT-7(%i;1~RqrpZ753Zp_Ijih=JHi%!TSkEn~yw?@o=Z9o9UlY1&eQTKr`;GehWpX zIZ>$}ut3CZ6f_?cJk`-g%C%DrpZLF_x!IecCD5OJFAeP0CjfYf-H+1mnPZ5xp16yF zZwk!jnQ!G)hUGsJ^O>!o@)%tnf86XQAzM^m=*zn`%~osqZd)VJKhKQtEtai1l`7u# z@K++Q)0Y3W{z)1B zv?+V{E>-$fcjd8*bIrb!ZhhauDMyUo=&%D51H%MNA9|Tu-kyFYo>8?kUUWxQ|AARY zFn@x^5B=(r)hIw;a=h+P4{3#>Cm5)0)Dy+%Cn%++0dwgz#8&s|u*Tp1uf`yWj60v+ zV{N#IQqz>eLiW!dEL5(JBba?lpVq1M@?*-ihN{T%isi?MZ?30_ytR2UTlwF(vc+6t ziW@5{dg%Awl=k)6eU09+NqKC@+}5cm*A$Y3svv2`+`o8FS@y!j*Nb{Z<)VuAa`nzNZ@ z03Xmu6r0ubA*4B(PVPsC`Ouf98=4pyzQP!jFPYz8eLRl#EeF)ZpI!c{^35q`z=s7? zltHa+MyH*Z?Yfu@u8epwZD*&4g?sa(!;c@W_N^y-jkJ7HLBKM7EA7#86WV0?RS?TBU}DI;dyC0Y zj*|rv+h+Zb&uYYb(jW?dPe zMk>k;2L+^qL%+%N^2q<>8&*>ga6CC#HeVDgFEVr$^%!RGh3Db_o_h99eI6zBs{Cm}zH_S>FlA z*Lst2xl@x;(FHN$3loe~cU-f##8v~ughut>SM;>>P9asC1iTZ^Yf zV!kMI;Ly358|Qsbi`lHL279|%4t`BUjNjD&QJ-Y}NV}?r&+PrXp;AtJL&ItAP6c0=g)JR{FEj<*k&=N2WLW1GT&&v>58I%4)p0GFFZ)}vYx#J838v{s`X8zyU?8iUQ9{*=p}{h zF&DU2Tt@_OH0EZf7$)1pnGPHIYDj}G5sO%pnzHO=-)z48lxMk$NLL6XULt(gI-BmH zgc6@PDg^V`qLpM~k}i?AY}AVo3s1n4o;rnT_?7;DOUdpfLa&>IL#4aCM=f9RCTLKP zgy}!rZA+~eWR!50$pzhc5vNcYGu=ZGaHrmOAW#s8GD6kB>urEzApvih{LF7s58@Kq7vP7P=p!L6l2b_hsDA4-0qWB)m z*0iCxeCu^7-^U_%OIIFWWLSR1tB!CWA4w`G$R@qzEgk&Zi<-IQ){{k7?2VK`szWbj zUsMsE5<0Jf)fBoGOY7cMRKApP?~e$>%F~dy&&F0qP?Dep$Vd9?D$7pOCCa7@OZh)h z-o+^Xd;UApL=a50AKK;biuQXzKHF3pskC%ZbvA6prLcAp1V9m!#pL++>7E~b_#1@X z)AkQBf+fN``b>`GyQFPs^z9c0e$*NF*d)(xE84p{ko|0v9jDkBFo?Dc{Rwj1DGPzM z0(;&sy2C(V&@@mbF$a|YZ|Lp#r8e@a^JUz1AHE+LtON+SPUjQ}5{Z2NUX_osuVPxhx6f;un?Kb5L)w)<0CwPY^C`h5eClVq9FF1r z`XU5VC2{#mDJhrWte_OiDY5BRfJn$U)(^E=k^>f3V*qpTF*BOshcO+#=Bq({xB!_- zS1isRVo&{GL*ry&X%n_`YC@T9PxVuQgTlmh)kYgg@!PmE!kviG-t_0Kc*5@PsaHxD}&djk0~~XNttl zNMR36_^f@z)9QK<@{?DqwXPQRSw0EVc&ZamJP36?%D+b}lMYb3<*=XFhB#jb+FYiYk| ziS0{y_mVm;n;PU8aT}(WZaT9EZXuf=!1+b)6 zEB{xlyjF-X4Nt=KArXobyPmi+5#vD|#6#cz;-L@3!>3IFNoS*k`~Vs6pLjTFq={|d zF3VQhH?|7Pp^icpN`-9WC8n4Fzr8PnYv4vNSj&p=Pv>V}@bIE3!D!n96y@HhvE9_ z?`)7HnExe-3rLb1r!(eP6cfkZ7+so0k3o}YeTP@rDy(Na1TKS>jQ65|+PL$JK+_?I z*8=;97hl}FbnKy;($q)RFNBDRt1S0oR3op)DB5jxp(p0(^c z!B`xB0;pP#Q|%Wo!d}K$qI!tH@$g^Z=sL9pV4eLRSj(7K&=^DnrMyT2ZfT)EOM5d_ z-6P6NcYS7! zvNsdP5-=q^^UYY}(${wm;mXJ2urs&wMV`u?1_bJqgvpP7*t|X+^(ocPF^pf<4Sd0B zY#}M*d*y-yYc}q`uo5fEeptoITU!)^O2%H&H~8oYeIo$Nc>8ZRzVh%ds*j$R*NDxd ze;@oNl-fR4O@i<$b~5Ax|7Gz7kj_8bj(t_1>AU~j@$&u)f~y+NdprafR!rZKT#GC& z7Jb%(`@NVT({(viEnN{oyF?n0{<_MW>ce9yWYKs=?r^`j;;Kq#9o9Pq>{p&FKQuc7 z6C&{<-#ZBti*6v9m}N`8&E$L0qv*!p08f-jBJvx=`!_?fjYgYyI$QCyzW2t!u7$IS zb0vy%jlJ8>Ep5C9(qY(N+D`*OO#$Oi>4K2T;D3+>wY@kM$To~{GB$WcmMo1OQh~#X zMdD|n^B63((6w56YEFZxevsueCx48QSq@=O?|`O}H@Qw5sS}^?jTB~BDN0=W_OEuF zm>>5eXbZ553G5s7lKqKihez1cnSGy%%atl0Q~KD<;jz?wvQo~n4Z3za@Yr+_Mv$Sg zLn=X9ap_|KeKgtozDX+1V27K6*nJrJRqUGsdDSXW#E9(Lzg6gSRM7&*dUt&IK#II_=tWgdDn<9 zzVch-|FJ?OKlPqt-igz*>iv&~axeIJoj9ZCSMi8s@Rnf_8zUwOr?Irr=Y8qo6511i zUe^aww9-bsE)k*ii%*by`+)<$y@+H?*m``L0x$NxtQl1Nv3SSJp zkzco`LU;;Km_*Qzy+kXJR#dCljuRw@P zJ*6w9`twkiM&y68+0rwec!^2ZZ!f{!ABB^hKW-Ctz2BTst`gqndt~YNQoQt^S(7H6 z?HJmbF}=NfyUf(ij8y-8n*5)fO)gxuIujUeeIuU!VaB-rR;(#~#22eLmOy_Ss}oyV z&DHf)aJuYEOOU5Z@YAoC^a7&LsU+NW&+#e!U5avs`~E;w-q@jHG6-%f7@GopBX2T7 z0^|WcLex`5`IwRzk6Q$^uPywv_tJm$uUk=W(y>{{M=zeYtT3KLz-3J}!g=ebN;|xd zP1CuurO?KZKJgW)ym&6dE(_1yKg}VrZ&blEoKNeVA@@f}xeIk9%A+_K_naKtj=*7KQOyDrN?g z(2tjffrPU}5@~XOqjqe97ULY<0SyD4{d-oyp#%P&Zuvtr$J= ze@2vNpmMadPsJMfoM+q0+4>JmOecLYrXQwr(Lyi(wfw=je zN3H*S@x0{mg4G}~Ubzzm7-Zi;=n_V8MXC+36QHOivl={hC#e%vD)1X|Ty13o-7Yu= zJU=l^ie{cHieqDmT#9tye{taOcRyMxQM|D#x?9CVhUBH*Gni}Ex@tZ5OrZcdK}40< z7~Q4Xb`kez;8#_$urKJIR?Eh~Xqgh&`$ z=IEQp|EN^^c43cud`!&{QF);u>56+kMj(giY|AeM*WM4=e-gAoMy>x>OB; zLi-BerdAO6N5*fx_Po^0#bhJ$S$itO!DcNTzd|@X{dIviwLefxh8vbi_r(Bd^OC5< z0T5V5PxOOhQBSOSlms|_mM>{J&}#CBIAkuG7w8h^oYVHF$#-3|I&S$XPw7CJ5+Q7-@SA#D{b%5&rnSU z$5atD$ObEV?v zNu&x_-qi_hN9Y*#Hixpp-_?v_o#2i?(@)PxwX2@_lBP0bdAT2!|>xs)K zGgm(JkWqIf>gc>$xvJIsw7I%nb&qm}pFGw6cPCyp`=4i+tZ7H?jg{KbO4Wo^p0a9c zF04=x$YAd%XBm&7L~x3T0jS_W#`r$MfF3kFQUe#058k7 zDQW)OMzq?nzLYwMngKCS2)935P{m z`&Nr52(WjEknsbSz#yY=zOv>SkK9***x}*BFE}Vvo*~nfz7{)+U4*=MhL_)ttOMeQtZe3Mn$$;SfMHjO0C|XL zcxb(Hr<$ne=Szu`D)9aobTx{zRF}0H#e>UrgFw2;E2@>J{@B9*gvx?FRBvKrSQ$#o zy3=SH&{LfK@nT}l({wS$)z?cGa_int$v8htGA>(w+zaokBrf0l^1k@*Z@4G#4134I z?4s>&?aMwJ6&x^sz;qRMb_G&MR#6pUW+OmYL;E7(4CzW{^+VU>9LTh*A`H|8Nak)U z{RkqX-g{R)&LfBI7S#3AB1pk%@T;L(xWU@~0h}gyh-Oddygy;+?XrIcX$5KAofr8M zo?=9H$q5n`;}?Fz013In>D$ORnVtj2Kl$Eb^d?WJ)Yo&nDcDL>`j=nj>w)T6^Com4 z^?jKtJ?P{9M&*=DFydF1sSw97xd6R*dNYGmzFNBD8SSCGL5VEWp|i5=*rnkh5R>x> zTt&m8J2C_^O@mn&)K(4lE_|I%wx2H9$D7z_r%ZNJSh~j1i?yrfY|XgS)DGscvW_k2-!G^|qt!?p z7QiCe50ytYJQ8T|ubmUwwpca2C?l`b?6>Y-AlRoYn`w;)4nI$KCX4k~QI^HhB~JD% zWAW8Ox;KtgJH=q{Xr&S+7QpiI-%sEu5+`5e8HvH&%~>9y67mywMc!59Uzax<+h2U7 znMYipD{l41;W2-YHGlnv!F|&13by`nj%Kf7CI9^WZC0Sn;_qjW6{>ZfzD+pQR+fcG zF-}af=^1<6zVGAfrra_~w{owUOdF>3LmXw-H>ejdj|`q!dx>l5pd0S|j0+(#Hobq3 zq&WjryI;PgW)h_P65%&n-?8}-KF6gvItTBAY@Zx`3(A7FJ!CtOU_rPxme2(6L&7{h@HKtB%<~3Lg&b_kzkJvN{``vZOJy}fSTlKv=h%v9 z$-3)}&irLY8-;A}cmEvtap^GED-fJpf=*zt3Nb*E5c(OQLNt32r3P2qjh3Pkluv4H zE9$3dKb{+`(&^=+>_iUHTc(pAVLFtbv{TEj`1elxWq@ErXj_vkViXr_6hQWPNJTXn zDYwu_WxXma(^wH!OUu0_OYd9FidWpH0@Ue1X>u8P>KP=0tR2O^hbQn)>+%o26Hx0U z4z;shGCVqI<-2KbPC&wSm{mHx2$o#CN^+D^+sgD-!gOLmt{1=%o5J`m znA(m|h2-ETk&dJ}Yuu$FYcG&C9*-HMlAOuv^mDL7bDffz1pVzyWdu);eKC49k`WmLM!)na5PoAs{Yav*EH9mAt@M z0q;-gF^~UFn<19QEp*nmf8|Okw=J>=_5@CTSji_AM|KGH@K=`o3|_-aD>o0pHexr% z5-5&`DJ&N>A>Z`p@%_&KP?J)dym3DMLLszAWfQ^LGiUw||;2Q&a6rTrV15r3#xFO!7`ItR3+y%Osq zGHezpcfF*unVR6mJFw8%Df-JHNx^zV%yz6TUwuqS7X>bN))0J%%f8<(v(^-m*P-osGL7pRq3j;c@6yu>j(9ctU6eh09PJeA#Ywt1_P5-kKG!K7N2 z;;@*n>5KNmLA@#Rcrhho_S-0YU7Q3;RZbk54F`mR7v@gVU#0xR`T2wOegk??2 z2n-ydx~z4x>*s->H~W>+wvde$Eeos$a}uVTtgZSv|AYkgD`cBI=U$OFGbDiYjLgb` zUY(sV9(q~dCtfIYGfmNzaGRL>U800@fHBGQFC!#LmNFC(=73qG|J~qcl?d@xNockk z6*3V$0XfX&llk~}^vwDF21q^zpx@jfwl1+bfGKMdnyTs!N`CfyF?(!6OIG+wP)y*{87pSO?;%Yt^IBmAGe;(!9Cohjiw?aR{<`A@y_ekYq(nG#Vj0&$S z8fQVm;T%=|YYaW{l`b7y+dC^|QaOd|!~K9eeholSRmSi&oysk$aOxd#d0Iwx)OZan zyl(<=im^i}!VES&Acv;PyXp5ix5Hv5&L1X&bu(l9z!(*X_I||x7XHSw(cza@`AJmK4uW+3w02UF*lKfUdd46@AXoCOC&0gdkKd%b-TQj7Lw@{rzQwW%`X zb$yAGy?hu6BAEsI4OG$>T~Fzmuq3y8*(`rd>%#PA?O8`ANv!^Or2jf8JbkNbc zE~|we1Q?8wB} z+aao4qMJhRs27)k+7WuKdsn5C1r$5@QE^77zL2hZQmD0x^2{ZK8f2RV)RN^Z0`SFk zqEN=UEY1Ia*PjNHa#B%z?*KyP!q)1~S!yBT1Kg%eJ&=Cq;7S#6{)1#*yJCh!L!Eqi z?CDVc1Of=67Hzb8K3{MOqzZM=lZ+pabAVJL_FpvhDsQeaQIxfwnUA*jY>EU4ZgG(%M6K+M|Yrl zL&wdP^-72cxdh@m2zv+e9&-c#aSx{y%CnW8=eC=NO z33e>n!s%P-qV>P4S$lq^kH>-sSm|Z6u=06u4O?v1{!~AS(RGY~FVbSG-9Da)QPGMn z+-Dq92>DLdb`5;S{xbPy?eocL{m>-n{4sh$8j9c_ia?_sR6E^6Lr4S2KSc1xMk=Ai zl*av}ZcBm&9?_l-zvv-YnJnJae*q}_HF!-_WZLVJzB?$qwCIimK^72@5uW)Q|3(kl zVI0ugi~$3IIlfWpR6>oQ`pSlMgQc1(K=7<6Gsd6=(iMl5f!q#a3EppW@`!L^@9M70 zcblBV@R@T9t3@0t4xHHV_ybLjJF^9@cJ?mGAWB&kr z2Oc+0f@xmGXozyB+bl`N5fkLDa_1(BHdK=6{a9%3YNCOdk=8=X(HOV8w0#;DRc)t7s5T=P0HWu}nTfEMvWy7%j8g;wwR8nbTkcga z2YoXR8d7z*FmwqIVp^f{l628E_?Mv&OIU zkv+J(p+4sKITv5OoUZ({i9U2wmhI=A0V4Ee$Q+d$WiXX&$IZk^n^@#Ek2t=jF-GY8 z54aA^YNlMMi1{1mtaOZV8j$74`7!^-)T?gudU6LqY=Bq@q|(X3*T-{wu;RBtJo8EL zL0>bw2Lnl^L$5x#gKA(vs{Imub|*YP3TT+%JPd$Mwu?;Ocx)dqn~9hCkIleubYmw! zR~V1crr?X^ay=Ba70+tEpJYf%!OfBim^`T*4RtS7Y;c#h*#aXsrs5uP^brsLexp;b z5W~>I*dPk+k3)lN6IG_gBG9)W${8F({g|8iz^zNQ*t;^71@E}Rz~!=*Jj|3B?sN3zTB09dc(rIlz`JMa@VVB2ez|G zgjVKl2Brt;*Z;N?No5u%euT*?2_L0ag5=0>IUsV0M)3?QPGf}3o<5CJ&Wcr`!_${k z+_RF; zjJ3pC*ieH9uoECp&3NtHM-t2piNEurt8&9n!UL*PuR1QH02=^FGUuR8lI;{#?rjT~ zS+iQP=qA&GIpUq6ww{14D_N7D2Vz+TbecuIq11#OW#{O9;^YxThj`1loR;Lt@&S6TuN?fdRV4e4r5AxR;*e(BICgM?x)v5}GlB~XpqeTFjA zo+fsMd=}g&acyX8B9m&rm~2cP7kiFd_;tz^Ics0x%35)mF;^_)?>UJw`-0mHS`>df zs&8PHev;8v18^dN5BwRKT<@*^p4$%2-$B`eu zeCQJZSC$1W&ZHLnl=(0ZC=W9o{#I`1ldnN%DKo>ATJO&25xW1Lm>BJ0DKL3T5>L#4 z6buus#J(lpbGocyWhf$rFk>?Y&-`(vmTsl7rGoIw|A=w*M(C{-tzdaMzs*JZ;M=g@ z#@33u8CwdI_pf^|bC)Ucthms!j9#?plCizVt=zTiA*zk$Y(%qRb0C^vKqb8+xXdi0|gengq zkAGOr+<+?93TT#$HC&jazw-ImZQuu(#4GYIvk?oot)#wVscmO{C^y;TS8)dmI16Ie zWT3klg?7Lw4*UMCt;}~ED&@H+YH_aFvb8;*f@T?S0EC?Ta&!f;v-(qO5-Ij=ua!vZ zxhGuGL|hXyr3-qZC8U$|v60cQyNrl7q2DWscn!>N2SQAWPU<^y5h@1K)ANGS=n?4r z2!uGggbj5?p>II8Gts^w%5ia^DtNZ=4;ZxZemaS(I4>BPA~ljY~YG4_!|mj4K@8=FSr#lUrmqdUjgTd ztod>9KD)7?9?S^V?-Uo(SeUvGD+v82z&irHN2m%Px>n_834aqjM80wn1ryDEY~l@i zYtzK8p39+aX^NJPY+KJ%BMh_X1*6cKJo5sO?SAxO2;|PEht8KFBWmoe&NW~8uzG}n zmln?tjW}>TMR!j)fWeN8i{$tQ)XMg}N@p;3`=zX_WfNJNb3{tOEgng&;sDNX_XLad z;z*4$6!W+~e{Qr?N2pO_a!YneKyD~;KnwSnA==cZRBJY|U!+7~?sov`C~oK`RUi&_ z05>DfK@5aU={6y?e_}#~dM9RoaUJ%Te#gF0p{P&!_Fy?g`y+%SNJhpWaS2DL;0)}DSOK=IOEhFKgO@s>mlYgi{d~j>f zZiusLXDzk>tAGk29Vat(Sp4FKz6i-6jd%DSzAq4ay3|b`zs1L+?rdpZ7KaWGwKh@0 zJ|xsbw(D@mwbD%k%k*Z{Kwryn5LpvD)8+ z6D8n-3ERWp_=e(A*IB&z)(C6QsivjO-(4tIl-Gw-!(R_CgMCCW^fh-T8y|1=X4UCzpR{cUg?s*Qot*FiEIT6%3&0?kfVPHum*k} zC8(7>ec88*gzK=H<4+b}z`^Y4I?xTycAC%=VzA5Uz?1NYZc6~>m1Ru2 zq}J&S+d+6bYJqvo;kC+U@taGGn4h&FAl5Sid7%ib8m#(uWWLe1TL9RKcOh0MD?|gn z!q!Yx-S4Fy%6J5Cg#{~B3Lx1IegjPd?-|AkgP0y}V@bas(ZGp6t>nrRGEh((X6{d0 zzA4o3+RDSyy}hO1RAa{E(wZrrC0o+LZtH<5j z=ccIyNmxi})j}*O1SwPEgX!Z>-6=gnv?td{;pM-l?uhC*P~8sNb!MyVVLJhgUbp$>Ni6yN6~2Vr{~^m{A9v1L2WAlhQ%R3Z$Q#b7GE8)@#CI|3Fl>?ZLju; zqriBJtONz<_%5;%BvFf|o_P-rRQr1Dt+F8`7q!I|$dUcqZa%WB&}>Rv`?piWJwA{n z`NF%831B8shqFL1sF(e{!Fm%R_1j9$m!~qwbSm?1Sq~4JIJj72X9L1ZH>F|;pQQ<= z9_yNLi6nv2@@=$gEHykF=L)4w5#JRGVDkk7O%KO;zUy<7Vh?~9-A6Yr5#CW~;XuP+8+t5E`&`;(3%sP31+pf?_{An;&Ev|fw1M7&~0 z&L&{a#MH!9I?Y3K2s+9q^`5XDWe_HE6Or15sp0j(NEPv1f& z@kiQdMHYsVhP>z5I>lMQ6E`kF+CP&V417CzVTcTdXS)*nMxd|A3PJaKa67nbMJ`+q zG(m{~;=yJ2nPeg`WJ*=8bO!Gw!9@yoCs zfQDjc;KyOs32?(qSxl18r6q&biyX9)aqkfl zs1LXg2)uveh9N*`G;6+rjpvz1qbE8!vP7WSq!!PW{JrMl`fv&vk1f`m0aYBQ&@0HI zD-{j@w;E57-s_;VT)zW6HuAjCw`qBQfQu0-f9{w(a@SyHPIN~M(5j|?x~0wZ>3sz} zT$MREAdF&B`J#Wv`_Zf1rPib^B{Zc&5-26{6aysWpUwzTtvUlP;0RSEDypZ?hj- z!(R3V{6b-G2@Z{(2SSr!Kw)|#oz&<>vyN;CV6gbNM`g|^Zj+B4hN}HcXE3PuZM7F| zLlAAse)12WK394hPAw5^(XTi$)Yz7djT-1S z>~hFiG8D06Sk^qrQuzsvQ3wRaGe3n@!Fsd&Jip|ys1B$e@_hmSpv*(EeX`R_ka3kx zV_>p`3?X5X&^JK7<$!2!+g~GIBnKlpzUa|LcFF>)E#y3&KO`G320R~#0L`W&y3d@H zpWHrqcp`wkqj0f*HDl@4eLdzUix;-2YzEngNAM#FD+*fz*qvj(}4uq6u@-I%A?TaJhsKsGRDN- zWh%7firN7L?C?@!aI0Mkn;3HkkZDt&*g)VoO^uW>a{zDus10S2zR zFKpnxuPUetu1@H%*KQWYmohbCwdn;?xf*i^?0nUe|G@xOg37UyBsu=&dwXATz~A%KFJ7M37mRi%Z+q0R=9f!# znSuNK@#THIp;~+uYUIeaPkN0+B_1=s9+ZfG1#VWl&jiJMOC3jKT8&9WIunhV^7WOeW$6s7z^u9M zpssCvg4`60o)5_o3xN?Yiv}O?A;WR9x1jRRa#ejTgrEM;#_uIfI0b5nqSVR3mqNH6h#vfxORSNu{wW*&*x$*iW zbA^QF9Twpai(t;X-IUn$2p;UY+p{}+2v9(*ZVx}W+$Ys-sG-%^Ug^0G=LuFAf+F53q4# zgJv%D$7E7hp&zI?vN1|~>Mc5Us_X`jSbcW9-X9CQb5k!CKFO#kUc1&d6y91!BO%Z1jO8&ga#(L7!G$3<1 zxrpxgs=DzfOLT^#a!5hhvVwd=USGHht&0NM?X9N|;=2|<<77MN7T^x5+5SZvwH#7C zbb7{|)@2lN_0Pw)GSOR$oa*ZvC)i5X8U}N61q!z)w1{%2(nHawM8NlsLMxCvd`rl7 zGbYI)zS-fg$8`Zqn=i&MVmi7I8yv}+_>)9Y`o8W>>H6s98v^etP7MT zfYm2#oxN`~3A8ZH^th6o5d6mvyUl{?37B5gIH`}B5_k^WEG?)!T{g~yRIDLndjp+j zoA^tm(;Mq zWha(7Uq$6Zv8|_o&MS9PqntzYP)jB%_d?)SfR6)}IFgk$S43T}_iwEdMt%?bb3tX> zD;M7*6OwJ=fiu`-P6g>>r|6J#Vq%DWjjfJI`T&@;Bv}60PxS8*qob_k{Ie8{B<$Be zf37(L%YLkr9FlxXMbnYTg*=eEK^pqu5+we159l9zAn%u!lir_^t%Z#rIW7c(^p@;s zXpQ7P--5qr=?=_5EXoOj`?p2O{-8AL`LL%MD}d=`#GGdAW^&_9N1;H5#J8U%S+v*H6ZyxE_pRf= z6s<;myBRlW2uL5}$=sRY0sD1zy95k5WLudKQg#iv9owCYMj_j71VQI_6mr-{dDgVh z`EvwELMHCv&a}R=3+)=+8jWl_^rk73Z{cR^(;)5fD~+f}GwaJZuM2JXR;o}rT&_5d z>eL3zbOmAYvFN1GabOwW1*p{lop;A>W4AHCMMkFE&=V1_JfAm{4Ah}Fx-GHoAlZMm z;UtLz@BF6S64xWo%Zo>5N}mbywfdo1I=kHjEcX0}LyJJ)GmaQVjM&-m(uS25bb zn!3lX$X}azEc4h5WRtgC^cC9TwdO~>c84#{;ck~_Q@hwjluO!>6_sSQe_jgDNWY&qdjSH2c}Csf zt!+FYyGt_i@OA9&nQGgID-cO?hq&@r{hx7DedLg#?$6R6Q(@2IwxW!Ns19A=!r}k{ z&VC4k?!%1uf)ievQ09_|IeFx>lWnZGy{jjZ{)7Bq0QS2lLj>U1HvY~@`z z%9*s8;(q|?mnc3=jYG5XNKUpm?ymA-`9E-oF_cp>h;=kfjZ;`|vJk)}x|v(BNS=AA z0~)iAON2hGHZbS+9Xys)>mlMlYz^P`17SU!cg z=WR$TCw7^_%hBj@vXCx;31Gi=T{c4iy99Nn3w^B#H;=aC+~eK03N*TnbSXaggtOre zEauTsj7n#CXd&4E*NHz%W#im4tY*{v{oYUM8)h%I^Q{k-bDR@Z^ba zPR;w-R19%CH?|`2e-HboLL9iL@`KBqPA=JOpwGUX37EfDO23DywEy$*F^C4I<7sC5 znKr{?7aOa&g4Kfox5vKf2p){|6-b5c6O(v_eBf%5a1lakDC0 z?2_->0TSR4T4AQdlusp#*j;T??zC0yr=Au6vnosQR_&)3w);L!7#=!nxny$ZPenP~ zNs|Z+C9fK>h9i4>=UK3aCismTscgclop45n?=7#ztWrq1VIc2Uy;{LtyD~5ujQyGk z-afVZ>pHl$J_-Y!&}M*(bjvM|Ctz!;DhNj~_7-<_5ji;E;R}TR>}L$-Fw-YsrtgCJ zy@a0Nl%kRq1f#^sP$d2%dV=~eOC0}}>-HbMpV{eHD@-Hq(gK*SQM$NC5$6|)V;;c4 zJ0!Qx3ZaI>N5I<}u#%uMp51vdIaoSW(G9M_e9y(Ti~`lPY=r)AHI(FW#7XSsT~jZv z0q+LV#iH!p4?e0>wxwkg^`eFO{%4E5EX1N=s-FlXXfEnCcN)+P&|9fxF$B(C1X9U| z6wDIl@%=SBGUBbY122i&j#OpeAlMkN#Y|7&WC7$>cX`yAHuVU1i9jD5Z?{NJyu*)R z2JR`GRby3VGbf+=y7O$bx11|WnpoZ{TuqgyGM80MpRqmq*w{oCT_hIMs@Oa7RB|^NS8@_LUMkfE5 zMm=4JveFtZ@esT`z`qfgpz@9WUN30U1{kB#zS5f8kh|uZBURhuUidE^9<#K^rIl@8 z*3{6eeX^Ju>JF~B0h`g7&aKdTl8&*2{JOgHzRqw4jP3oFbH2|(ig7SKFpFvAA8c*8 z1BIu77rp%!-U@&{7=HfyDcoNu4@AZ0Ga^8IB31(@@Oh|51GWtlTn$BQu8a@ee--v< z@c1in^t{}A@l#R_1#fTZ%f+&1XKi66ja&~0~o3r z=}iw7k_c$%9i<5vK#KHstcZgAXojjnM0ziw#sU}!pdcU}A%xyS@81she($>XT`&LW zav8Gs?3sC3JyYuzf6NWpJsC+lp%7=hP7 z+WUidX4_m?2f@U+Gu~Scpbb?ED!D4d0Ct0YVDD-s{j+)RI+HtlRxzyIf)OgS3MnNklX`9zFuiAb9%s*c?h?Vzf z1oiN8K=C>ilyk=ANUT85BTA1~fY~Dzo5$U`zU!)MY{Zak@4=MVQ)Lvt+>H|^4rkQ+>AjPOs`w{bra_oBUk{B zYo`UfiqE;l$$^|&(xHHWWeqD^F2}#SborQE%b&nsCo=z5L&_>#H~anXqhM16)6e1^ zN&GFq_1e*no_k!)4!AvG{f2!CxX*>qN|tzbbKnfIJ^Rv-ZvrLpm&4yT=#ZX*btL$G zE_V>+L;@$o>1pPw)Q3ngcLh+SgC`#Nedzg$7PhMDkdKEwZ6dhtjIlj0V<{dXnn2d0 zBS^e2bXv^v+JnQ@tsqi6x?Xb{jw3C0Tpu2Pv$^jZ*ysCPm>#ht02#h^Kjap|Xu|Ep z1HTjtu9=V|M-&ddmI~-fj5>Dtk zP*D#F%E8#rZ5jUBiC{)kaCtoPQ^vz@+lcHO1ZqGUX~a|o69>_GZ!4&LC!6lBh2J`- z{kseYY;W<(&3Juj9o9iD4OMEo>$@g(k|$kN3HcEy9O`fje>e z*2V*WJ^Oo7I7H#k)Zl8wiv%E`iZ$3c$eYN2|AX&w1K5_XD%$#KX8Iv36^ny2Og^KPa2z#Kf4Bv|p{rLC`Qj5xmD%5< zkVBwi6=Y6Ie}g-B<=rdt;Ik6eg)MYDUpDO~8|Lf>N@6U|+ zynGBrtTa}asqVsK;0WgJY4MaF`+a+zaYx;rfQ1Z$9t~+mb+&;m4dxai3ZE3xS#$uD zYjjTQ->U#@(j9z%DwD9Rf7MsH^-T)(qvd!#U^shA^H_T_Z@h4^PZ19jkc1>s_CN}! zIgo@?m{`G0`;_9)y}_Yd;-#GlnXo~oBwJ3Dsq9T=;mewRy*v|gn%!F-_JjA?P@)2| z7KhC34-GVP9a{vnE$heb9ES8B%$2aFZbIeuAG^iT(B;}jQ6rY%LNI0T$bP^)`T=xH z%AMzu@voBfXEfmVAKN$Vf_E9#*H1gByzF%afNW}&reLSWuGn`5(y!!ppk1_CUz1@s z#Opd`%jq#!F*%rMLS2(^>-?W*9h`Il^|g)wa`~R&J3Y8R0!06!FpLJ8F5=A&R?8#f z--53ELyM9O*?`N?SlUfwkv%J_y%JnA1s;Bx8doz=ueK0t!pXbrlM5P_2Ik@h-Md&= zw|f!*I7H#YL*hr-iID!sdHocBI%HR51?OPqf4_2utZdV5HYU2lF7o`rThFk(`;;};4On3xHb z6aw$p{s1}S0Q7pR{6YPccl$paT{Kn!)$Gsh2@KjeWxp2zbunLmNCf)J@3efQsfPpx zjNBG=99?D+9{P?mx4n@m-F-_kB%Ud__!)fQt1k(FS^Ehb&Wh*Yx+Z8d1cxRhZo$4i z0B9601UNiw{i<(vr~-3v;WKb)yh_L-pqC&6wLu>dRHYT8 zd??2>x$2~wUr%3c4PULYq{$>BbgAr;uCe2s%G~J#5-a>p5kb}+nR%sa8I@kBd<L%H=Ks^0h&Ma0}W~iBM)K%gL-V3>IGr^XZv5iYzVf zW=_PvpU!2D0RD#;@VqYjZjfzeLm43bj%;5V#yV?FG1tMk7#(>y4^{WmZV4bU%D>+y z?D)4pv5$M2adIEao`ioN7zQn%Tzs{I0*Y~B+1#8@@Yf_Ww5c$KG$e zTDGU2Ioyam10W+ayq}l#_3;R_^N9>IJxD)}lGOIS-aHlW=oetOdcQ+Z!}ngkGUsOoou$3%Qgcz&B-Bl@iK-c-oBjJ$DG|XxO%Z8Vf(Io$9GVa*RKs0lgX(t zU+@t`l$VyQNP8?nHk_ytQ&W|6_F)kuMzGY-l)3t_BKZs?;l#CYBQ<@%L}WyXG%EG} z=UM^0v=>xBN;AeKW`TH))C?GRdg~*}g%QSB8=#m^K!+N1nU^uZ5zLq{DgFoUD8)w8 zt<1g8tIK=_*i?K+bloj?2Fo!pY<-t%7r^`Uhw$%s%UaG<>_fp*~su0vw#dv zf!L#fWx*d<)|GN**(QqFEij~_QQ)!p3(ZYYlnujK&ppld+YY%6f;&Kc@|>~Tx;=f; zP77(L$%H@#a%8@wt(}mP9hJIyM}-W$&*B^Wb^9P5pR;ah*Wq6H=~G0lm7&pvrAG(X zGsuJpGf6ox2gTJ?6MJ1C2EXO-QP}X&+ZS({XPgc(-2{UYuC^-*a1XF0IULHZf54+P z1qJ?b2d(Gl!4m)S=AI;WF+`W7%Pa{lCVG$VLmJMZ)q03pmU6Jx+9)dPR5}mB_jEvUUc^KU@8b#we2`Z zBwyA62XJ}tlE)qU+S5LK1??h`#0<0b7DYf{d}!@>2S%{Qa8tfO@&e!ig7&8i^z3)P zoDjs79XenhkLP;}Yp$Q`@v6z$MjEB77*J$J()F zARnITA1n7MT$jBcs4F2c^kS=W4I5h#35c4nU#5c#seP(Av;Q=QK2l_d$<9&-kP&BnZ z8qrBofw{Iu!*luefu^iW!+4&3&}#J&-wMe{-m##9<@w49(tJk$(t8uoeW&Qw!RD82sWK!M@?FnDm#Jy1tO7`ShkP3L2 zoWAOPY=J_l{(VBwRsJ^eF+iR4u!kjwN_bUn><Y8{Bu7`x}vr97H9ZljZvDgLFTSq?GOj z)?Uv3Y#qs95(9Q%_EFyN_1P0{G)Ll76)dX#Ni)R&zj1X~lqAg?_3KZ>fk&;BlRWDl zUZQTqp~IQSr_-lHXG1y|%Mh&p&Z%6U_@VgvHO}9C$C+hXZ?UtY6Am&8h_lp)}L z`R!)xWWp&JJ?S5Nw9dP3D*6n3rRef8J2AyYl*m(C4Y`M^sH&cGsX4>1oy z`0T#nHo8KcAl9x+D?BtgWF8oI;)Xy$-!-oD`k9soReO)@=WY%NZuK+j-T!6SBRivG zq~Z_{#u5csswI%|2|%)FArC1VTP=4G-IjpqW58nzrkR+n9~5HBm1)QH zzpf#lj7NlMI(&@$1yuYsKL!l;N>dyH{s#CvOTae`jr&nONPPfZF=MoEM@kV{p#qUQ`G_I-?mL@piB)4zG`mV>Qzo~MpIqgjAwLh;>Qwtq>|9#2$84zX5q zFWfUh<#rtyjK&Ll+y=u`EJnjgc>6c-fqyUaQh}4T}4U1Jz6w9U`D9Mg*9=uRi~7kIY@F zh$*Vm?3~2`RTfz&`toiBQ%W-4DDi{);E`L%$00<}t@pW6g(Czh^}f^xbL|!=Qdo=K zP_t&=p3%onOR-vl=u`nT%2{~jL;=1b*aZj^7}o{ZLL(ljsK(#&UNAJgd2ki6%WA+k&;Y879j_Zol(BQ=9LNxfT2pd@hF$I!wq%*7!dXC=e3mE!RT{tzOOCg zm)x1F`*FG;xh`L;8*EtIu-Q6DlpKEH*eU=Xyy$*iGuH2KbX%nrXsHf_=RkVMFSrle zGc?iR(xvqAEcrbTOKLefYkBiLaw+8JN|f%8xL*?8ENv|mSBH;}KTDh!DPyv6F|_0nXtIEA*wSW6T;MA8Y$n`+xvhyJ$v|==Vdo_7>OO!@{`+$>r;J&` z#d1Y_&q!Ac?`CvFhi~?JkhEdN^20?#NHGOsZTIvOQBVTA^jb$)qBv2)shCY0+)1tIg1{{k|1u){a219 zZAJEzY+6r%YC#DjalcT52dcR$&{gc)B|kAw51$N614?3`-}Np<%-m5Mh0SQ9j4?7U zlO$i>sLMKMNL!aY>L>zpZ#0S(Ixz#q>Xj@$r`=#&$e5DFIF|V)ip34r7^CaguW=W| zi(cNW)c{XR^nSl1_Mfb~e4b5O4H`Q!sGi9s3h}4L}J92?M{WIM^O3JAA zGXP2-E{0F?U(g2AYB_&t1h;?r(9_R*e&yM_KW73w?A7jZ|>5n*X>$yN>XQMoWA0+~+eSU;z1TRz_*%Ce1{U@S`Hn<(4I z7tDvxglnBi;|R@gphar`JZZ_SN;$%;(?qcY$A3fT0I216W{hlH24$bT!`DfCFxiyY z67^jsRnSW&2W-o<`QimkDS)n&)B2-h} z@j`w^%_BRrS9jS4?{q-1a^7Un5ov59S#eH2NQE^C%-|HWj5&wOdP0!`X&KUJNb1Oy z!>F+><$UI?^SX=6cyAY7@zn6p|)aR`(>?h}C~ zP^nRO=A{FmEY|^L$zj9v-T&Ug(s1FE1ByFIwf;1?ewnJMEl&#c$UPqcV$V#cOCCY? zwJbio3w+^P66E)!@JmapT)Qgf)bcx1O{G7JC;{RHJZ_2C;RBJ`A>(#52W0Wxh`FQ3 zicb+MOid4NgV9{QKcd8b1Tvmfs&q}`7VW6YLXCVN=x1SM zA}SNXv;!BNgiFuDm_Zu(9b@)#h*3<!!@eZ5P%~)}$5(MoRSvW*q({ZqJsG~O*_cW{oaf<%Ys2wCv}M9P zB%{y_a8uLr(`%3!%<>CKlW^-lmzki$R_$x-Tx_;KFS_d?DV2B`ImA`X%W&TzLoB-B z<_e@^;JD$%`g_B6>#*ynDJO7^TjAT%>H-N{- z#op5BeO<@(AT!a|d9QVtQVwo^2a}HYnYj+^iX->zBS4vZBzp?(5K}obBWuI2(teGEZ)8a^Wq=)y7XGuGKBA@`1&&SC=NC8=6$q?~OVg1XWqlepiP*}eG)~Ku;Ft%gp z{BSlPGqTYfY#+y-$c~f=j0(TM^BDBa1crhahk8Gx{KI;uA?_to*ZAUB?g3Ghg(%XM zLQxCZV^6w!4CQ?BV6`$*1JEwXRixWRx7?w7s5A{8UOQS4X(_Z>TEr_E8Jb~B8%KJ4 z7qGwygK;wbl;y9UsM(qQa$w&4S1;H>_O@&2%;W0d`<$6{LzpmgSsTTf0&XJFs(HZb zSQWp4Ydwp9Njca0k2qr8pr=m_=M=5?@zzX13cEdyvNtCzt^j8D+yak{jaSx|r|%el zE(C;WEPe>6%v+q_x4Ev+haoGIS)T2?wwNTUO-T(A}N z3Y&VG-?62MgM@!4Z&dmmh*n=$4e9i}$lNzQrN?F`$@IXGq1c*PVn^b(Z~M38{9w|J z87#k?qSwn*IUsB!$H#t7rAr{Fo_d7$RbDt^Ejzqd>^=0SEQz&dy{_1P)_qmI1|E9! z_}_o$U(Gt)be-wgwQK0(a^{j{+17tD)=Y^X+)vi{ovhMg#-2Fg_@O;L{`R@_D-ws4 z#4}Qh`}`b&f@cH|y{&b~9Z*jlH)$JGQX01?u0<}jQhNPsr_o`gXI|&YOK60yEFr)* zxvq;N*JaqgHH?Nq$m+P)N$rlP7jSv6z=PjVEeSDo2*SEP2Sg>KbdIy2)UD7P6{kYv zhP_0Fd6UGMLsT+i=(})k4E+oIrYQ6DmsnJ-&7?zW5H~aIRJ~Wdm8h>dJ)gdjKDrkoPk{BJ=6S(G@m>`fX5XWSkSbL%C z=fxcAB}e8fShOP=k$=nG9vT-Xno1PBuRce9j79|4T(!(kbPY9Ak=?w97@IVaa;Y#D zDR8NlBx|A(8YXC^^IR??s>ceb1|r}Y+wyCnFopS0FAEL*BQS3rgxziQ#sGXR?4GJ| zGaJe>ff_`J2|mFNyD&2Sl3UDIi#M2$BgE_09d5}HS9#fD6Bumn9S4`|ne>uKl)G>I zC|4GBtt`%*$gJim9yhkLMU?Pu{MgXIL`v{9sf)Im|GBglxglx@s|dq31t7-8P;;@= zjA(Q5lg8a#4uwh6Vf-`Ea1HxTmh#c+2^>o3EpWw1%`PB_E7bmeHy?xyzpV*bgvztP#)`rED+ymxhIHIbp;Qb=z*x6bNszsP^i0siM=awD7ZaJ?*jvL^r)fmbO{{PyjO`5*-Aio;Y%OgZ*NqIqC0so18U*d|PLQVR8Ev^nCWv4532 z6fa5)MvMv9M{!(-^@n+UXp6_D#8C@TgObOLy~dUEwcPCyV;LC4*7YyBnbVt}Q9=@j zpvx%Bb8ObHEfOiBi@RLqRMlfJ&!8DKbM!_%eC-z9;o)QHoOKsGHL_^kau^fe8p#AR zoL>3%05Qe~1z14b=mXY)x50D64^E9gQ14RlUi%`MrSE#G%lAjU4Q}_oGx^ihy6*YT zqQ;63L$r~Lc51L3!T1O-k8~Vgf^+&InD;G&-NzVuqsg}DuW`|hBzH}21flv} zjQceK6ch=h+o1H*AA^Y0rc>dmZw}^Ebc^{~BgU$+O(#c&ANi#mG^Xcao8rI0a<1~d z2r*lTsmxn@!Sv3b5aWzNXv9!=QM;BR1tPm=!{=FdjS+VFim(tlPqoD`cqEFxgUX9P zNb3Fgi5>R)XiC+Vab1Eyuu)dPd_n5;IUby)oLT~R^TdZw;;6xknV;B0O7Fg@hlpI} zk3PRA`-U--lJ(H1W3ao*;dM_o2NqGX1*U;#$sHJkt`xx>9+6MKcnl_(#8V?pHbJE% zQ#Ihs=hnDUtr7&)s+fDi)JNIVeo|y5$eNse4VH6fFM%3zs^WCuxH9ak0eS@Q;1xE^v^i#?xk?ynDuE?1d7!fhXrSl<3Cgrm}g`(x(7)N zDQ1laOzR^ddLobV6wBuKYl)f$&NfWeomJIC_CK!cTJ+W4NPrat5MqG%H?d7=(E*yg z8cOY~VmZ~CsNFjFnj~SJbulruZdHtkK7NIL31x=%#X;DJ#b@EUclW|(N2nTBLuR|`M*IvS`;fO*I|4j~I)GlJL|q)JuI>7w>g77^za>V%JtKJd%H9sn*?t{B-oI^t96Z zOy$!Kw?*eqUoAWeljnIZt}vbvpEId&1+Fe^+$D?3T^<)us8O9-J}YD3U{m894Q$%54x^%+7bl+@ zCk8um6ERkVZHg2p`a^u8M01G?+ZM89D^a31V$1=JPt*eWC%xq>2bnU8fhp*+p5J`%0l|5poS0(S`t4 z4ylaUh`?@msxOnVO-iV_C~7%06Mwk>LnM5o7M>A{4N8k~zXJE1VbG4(gqYJ8Vbi4P zYb0sKv*hS{Sb;klVFW-%gggLkn?u>@%vNK@(2%x!8@6dJ4y%3+ymy=)D67;8`5hHX^C=VK4|pXEU9g0~mZi1Apg%t>+)EqxjaKpiI?98b|+H_f#8wy`Y=%SfM-U4FKs$#uKI%-Q2qF!KeZ%^D)@;w<>7nBun`)0FE5skw5<% zmXuKH=TFL4$l4cHPq`3c_~NiJ#!RsO?b2@mxSBBt?pt(i6RBCtH%CZKdJJo^GT3c& zGELHbT~4(2m<;Go*6!>vUh2GO!VSy00T9FUmD3WFBQvQGEsQwT`{|#S5(-Tp7 zu}Atp1fqh}$Ov%ecYbtaMux$RLp|Q(u}xMnSfLndVzKeqFAnIO1S!Cb>>q zJ~@I|JUW*eoO=Jk6<7{Bex(XAm5G*#7lY*p5n?cC#GVyYCGKkqDTtlcVuRp&QPk%} zK1-Xjfb~g1t%xZm!%g!E3qnkBbW#%40Wz05*+skx)#=i|Ne#YP;wQvRfBRC<>7|CJ z5`pbcqY;Dtu&!vM@DQ^LG=YQGUJ`@=ATv#qYVv9@oTw#{qqhZZ8$tl7h5)QMBkEbxo!NhvquE(6EDsW#-URTRH zKvmk%2;{xf34pHTKpyV{fwxAvr^1I~s3!rYJ@caKp)B80sn{m(SnO^Or_!t69nlZ? zSTL5N%+|@iR*-W&upr$7uxum5m>XhjFAh7GO8tr2l_DP_7c|JB043`|%^9M7?;yt9 z!C83*sXasisKFrC(C&Nix;NCX5Ys93eYnv#Fc814#+{Py-uly02x(?loz};UiQV4q zf~Kcj*!p`H^DmSe&_fT^@o|!--0kXG%=rd9asHu~nXFeN2{S+=s~H-ZA?kaF*LCLU z%FGnJ4x-KxVjQFfs`EZMt*eVMTgN7|1LqC&r4{3jh2}Bz*7LHqxJ?Fi^93OI8Aw2y zU@psR_}y2;ntT#9D9dJ@Y;p54YF7vF`{$irJFtteoG_bBb_TDhUt_IPAXkk9UhRqt zhqeQFg9kUhVVkls2z7mKnDymu9==$tB^K>#3q?n7Yw>DG&4qZvDl=&fMKTA)4!zZ(jSrwngr4GcvMPKy>4^RRm2$2y}P2FUV-KC zA(2CI+qqbM>tw&z2P;l8Ky3E>zWEo$&|6SCD0(pnbB<*H;zaHKMg9s7R+g~6+TJz< zgBcT@Ujp0NO^>0!g8M_Ns^P+M>#`_&_<@3emwv{vtxASlb_ZdG{c-X=p3%pXCd-;S zBD#Hhc7sy?IZM*^}KMRbi zEQVeO_O9hV1g|@)kXNrVooYUJd*ZY=yXF0{`Ak2EKA1g@uSk_b=+3CA|OE$mXoW2;Q5?sNc ziwZai->F-H8I8Z&^)-EWgcI(^m72qT5HJnOL{$Mwo5B|15Nui#G3J2+p_r(b#gG~q z869vF%GVwFC$96pE7UES$vrkuY;;TviiCQ<_pRpxn%`5Hs) zGHOC^Ktv#qIx)m~RZg~==dYR_UH6Hu>oIrV zNv>us=oZuP{?y9CJJA>owN`4G+2@rz!`dsAkcsqT#8-J+h$MGF0sc^9!}H3HvY2h$jbmWIjq7UsrN zg#h|0og?Ehk^W*zq9|Rob89?gUb$RhJNeKN!mvO7d~uN^nD)sbmO*9*l)c-&>=dmx ze`%(VZ*AfqWcO|R0}gRFi<CTASEy-z9n~KH7?-(2eR-3i(U4G5}f}k>sEu6^d0LO z@TKLr$>;UlZTB480OU%B@yI@#HHH+#eKnzA&rv3rq^D2`f*g#YrxTXm`D;B@`J)8| zqv&ODS4o070x&mNNN0B5bgs$v?P%Awk>?3Xf1R8Q!Nd3B>epC?0N4CS3kIvePe&|D z?Q(rlgOdD^kyYS*8aXTosgtQqgnR|{JX1gO+6x-N;dZJ9y7R~T@0da+E4xk}nJL~S zUFTUzg6;6estQ#_tY~!k==sZWRD9#Ks$X7wT|bt-QK<50Lb(^S>*h?B6`UFRquis7 ziw#OtF*xdJrY=87GqWG4J6K@O;pp+_S|Yd3WujYk%NHZyyqJLx#|?@q5EGwC3uVnw~TNq2`?Z37Q$f8Al`7}4Ypj_tjx=Xb2p7_{WAoC zY=Jx~EPt^_D$c7;j$DO76pJvyhUN2AXwd;dlZ#eTfuxZ`sJY5zMWygDHF43NWa>AD zpw^v0ieu}Af;yLIPa?G$wJNRVvbp2Mv9xrvo-NIwGtc<M5fU70TKHdmB9XKo}%Hy3S#4el6t|mk77VkNW=X6vjH$v6o zWR>XZ@)&n>DTc77buGGiP(snv#3JL~rSIqGdhYkaQ_=K6_tCwd7rplr-|Z{rL+lKhOOLQ@sqz%@wnfM6PBYlYmSxn zj~O*cR6({Xw5_^#1;hd`MSmaGXC<}3t*0$?d8irhT_AA(gLcG`v-NGSOYxb#51b?u zPmpFk3%Y4FJlccE9%^r2#Z=IbfY_y&<^HUvCB*6>6nQ~Bb{Ph<%8qe2fa)y$((|VE zKwbcHrHpV+uj%r+UMO&p$HRrS7-j~*4-F*v)8m~O`6(uHuXvfP@diuyCGG}z5N+J zkEW+!n}UFj7QD>UOMrLFsy)lgn|8VzR2!>WVE)n%7UGp@lyy~kLhY?dL#|I>f`rDy ziqw2zS9*6MW_2iH%rT1^S;j@@XW#VxB;Zh#TeN=fDh1O&lbx{^WNRX9Mmk6&{(Lb;yQXq`Y(-Cd*H_OYag5V{n(CU7s5(C} z>v|^P)QISacw-;fuRN=pN3DLj@4RjFnvT5E@2Z`>IYgtsZ)>5l(+`(E(H$2qcU;GA z;$@}~m8=-|zsQO4LYdQ*X!jeu!AdI)BPq|}c&MxL;dNdOZMp?&&;U&|hr=HHSbA53 z8Z27=-cJ01FsZ&ceimaTT8{>EISF&;23Mvn*xUQxR_1<7D zu>I(?^gc{}KIMTs`L|$Vh*ISwIT!Gd^eO(b_hYMjO=@br&SZWr0&vP`j)2l{_ zToc_NJef2Rgt2OSAuMy`>q(S)e7$Z%?RoNebil`$BCE@cbN2)kPm7KxG)lT|FIDF0 z>bMU{)e8HJj_#r!p>y?snw%v&xE2YiCsV@-F*j7fM+z>TSSk2;ev)bUQ7W~>CV89g z-7R-GgDY#B{yAt+sfX&-A$rsG&!JukrC<+ozCd+ghVxgcDW%)uM%}nM!+^hJ3LeU# z)YS&5fjrvk%0KOQb#8^N8GoXu-JI&L5AdrhB@4=ERuaDUY-}|~pijU~JFyk)bkDqX1Gr#_2^ZQa{N7Ni~ zS@9fs5;K$v*=8@hbj`FYHIJcWT1GPBgYQmG+kwo-F($^Q+R=z^I8lsT*T38rA9Ta- z283LSU1OccQ-8s|wb$wrYJ>zRBmN=rwu^xzOuUelanot=Ib*vME5H})88lR_zw25& zdt!JQ+Ai<06jCx8^XkwB;z%drTc2SXdjp(u$d7qlwj8iLnt4TAGd_t`E$8E4@+r-m zxIv8`>SG>lt^u;uv58W0dD}9;8z8^HK)nfM5A)Z&k(Z=ds;E4AjTLCOeCPyFtpX#=qshGU!lz=bXR z0%8frQ#+!Q7y*Vqu=H}NUF=lY@okB%rC8<=OtCS=;|_$uAXpYbNTM#T97DYgwGH+T znMo}7B-=XhVmQITp+eDch1mx>MYCdiYEicClNY0G^`Jtyp10dYhiHm)V6yh5RH_dz zhAC1;dM}DF52Bj7J+Vj@g`oTpcCg)$*DjOe-bPYtE_W=t6X~&Qur;Z`f!O*rcGm!K z^91E?G;KZBGm+X!X&Uhl?J(K?*4<~e=`rdbx<{-q38-_G4}Yri91<|w+-+E6c7XQP zP67N1|Isy|uFitsJGww-0OZ>A4aB`KQsO?I+5C`#&8i-D(}-W~tG-)FjUKf^h*new zZyi27^{AqGUDyVf0Cy4#S(&P=Y+)Bq00g(iV8=zQv$18Bu6ENO|3H#y2$d@Atb95n zw>J{Q12g7`LkFlAF0bcz?|G@k)$7Kn`c6LVCgfKY8;=w_Pi_sEY+vszb+O{rK+&_{ zMuXIcCcMo)T)&-Q)&r@+1I7~`_sMN5 zqICCoqq2!?G5PZj`4BTJtP7<1{^rFv=T+v`nRR<sNL#iqCF|~L4$yE z_xz*aGPU3j-BQJq|hT9v7> z1k(QZPl&-)M#|Pw&5p?Oz!B1(od|c-kuXSkR9&mNXMe)rD4>GG#&>0QOwPT0TR6a{ zscAg8yt#s;@f&N2`3(Q|!1V-JJ7 z^#8vP{3!)e@?F)zNfMnP6W~&LU|1u$c*8iqY27AXrIq}+RIlg<|L3TMkwSUhq{2WZ z=VtTXfJfCW;thS~ii1?ro6QD496hDG8^hzD^RUR6IYxq{Q z*6`{t=QNG#^aU9w{Ku~ZYma9n=>{^ffgXILNrmq|jeq1?g#v5js}h@Tu;oZJX3O)v zF5Ou6Nz{!{=rM|%BkEdxqIW!>p1Gj#C@<|p%2j_)S~I!g3PW@L=g0V8cQJZ|%}JRC zV*H1cy^-)8`Ks~QJ1|M-vt&^lZFS@zt=I-v``fknxSCQVK9~OZ=G3eD>ffb+o@P($ zy!0FN{5(@;_~CzkC*v=S)po`HD-E&Q|Kw0XXmYVl5GHo&YO~<@6KKYzOK0)Rr?_Pf z8=R23s|w2vXLX#6pz{*3P=4=X+4>IJJ5z1zzN~I?W^U41qHT_LUyJgt*?)`q2kUjU z9Lb?eea=?3cDHxFN7WVyg}lzn6Y8$Nu$DTXn;AdwYQnd6;wZ-}($B1xdYkabw#d?r zdvz_=v7Q+#y;SzdUmFd?l#_DT_`JQU9s9Voc>Z$}Au{Ku?hf2(XDKK&FbK*Fe<-AC zm7CdZabsCEUWLLe=p1dXS5EZIX(8;$$G?&z+Q(XEcH!te&!+bO%o~BfW2@tqf6E-qAh%N5pFW&0+oq^G;fpGO=igmJpOEIW2Fr z3MwCPueyV$-ZiZW-)ToW4V+jh5f0jUPX!IXI*X1%HVhJ7t!i2$ewKw|geI;Aj65XY^)iMLG z0tqF>;sUqr`N&&xk}I1jRsN5hlQ(-TUah?CDeY0e_!qLd4#>S5l@qguXj6ns zDcPI2)m2FDw?$W2q5MYSSFZ+a&y^b!&0dL`$pSC4mq6C;KYp&n15QO!_LysZs*TI9 z@i#Ew*6F<|-Z|>mv-oCNFOcB$ZWBFwWyN4`r88@9Q&?elAj@?{s z+Fq++urdjcFB9z+NK91OZm>yw-aFHtJvHuiv9(LgcVW9%Y6>rWH}V^2O+7X;Wq`9Q z=_=$5L<&^Gtep4}h}f?%-Qis%En9WkSk_zm@6)B>TLVd#E;|lMYyZb$VX(?{a4FeU zF+P{l*N&P0oJpTGpa-IKHqnc%S{!U;=}qOLo#g}T>;}mXQWEaBC7Z6_POPp$Z_Sm= zj?*w+JI>s%;$>2n@qPAN(V155^~OVreP+etH`BmNzXnBh-S*yTBxb{G8-v8@DQW^g zhi@F(cXjo6)f`U6>-Ja}vh*)QQoRj0`DL_M*#A0q2)JNln-n6hEaBAjz&V4J5{ZpW zO;N=CN6ch8|4ayISX4 zR&HM&PCUlvDlj8j#lLkfCw-#?T)CR}xq4XC09u6`pn=o;s`zlgN#chT zP<^8zfA0Sh*9#Btwp2)BT>IlL5uVt+&5`A97@HI;k=fo%^rz+PWNBb}%gY`Itp39? z+UV;1!eQS0Q$yBjBwtlX+_?IZfsRCH!fTJa-`c!Cb$<-+4>j{ncPh@;S?$8*&6g5u zb@I$4Mtg=Zrd4;xg)Uo-)p9ub8GgCdNt^Q9xUPnyAO2q+Lk@V1f|qIz-HX~@`FlV} z8Yr*wl^v(-D$$M3^v%w!)Fg0EyM&`R;t~|v+RCkbfQ`}WlD;p~U*x&_ZqM|}*uBl} zjZz;vE6DpJ%%=0+pH@z<#2T--CLa);fHmO@^I!MtMREiZ%)4Aw5UE-1wFduk3s$dJH@MbJRV4afijku%L2-rqI~Na4S*0J&#dSI^6Qw;y4Vz2kyTj|wVO-t?h?jSAAU7k5; zBFa5?Z$X^0+_jDR`KejmJ|?^%lwyFh&!Vosjh1iN zpAwF<;+k?@Vrv5W#I}G~Ztcs&vd3ZD{0_M#v7RV}>uMa@^9!N2*mwVBW}aws<9cAIog?C>|X0l9H^WBB(Wj> z-|Bqf2taFVR#k>N?+pINWG@HZIwLtvZuEEFl@qWEZBJsg;C=O*U91YRXdP?$wv6of z7;H~p$>a7I*sqGKgJWx_mx9Oe&aDTMQyew}IgMpF4fucC@iPV)egzVzdHGq78ogJu z3+uEmiABY(3~xwe#FTDVe30nirX%RQbY?fnb{+bIhD5Q@CnnZg66s1W=*hz1)crz%M6-- z%9Y;&zg2q}JMz0!A{}{XSQT{y#xuFdYZ5Q_q=OT-aFl%iSq9|X=;fSkmK)^r&YZw@q;q_=^oRTrPm<+;g>q5@bk1-}U`0vtDgP(UlT+>389%IH z-PbXo8#=a>7XZiC?>bD#`9=>T|Hb?D{tHx}hNMddlls3W?jV`oMH zgQb|nbY=yCSEb3U-k&rnctwq3e~*|c$mhQPt|1K?x1Qd6&{m30l2<{qok5HM<6Ygi z69cL7FDdlagsW}8*Gf@L`0TNH~Lqg9FXMmq9?Hj__hZ%U-0*!khO~U0 zoQ9_kkml{oDoLS}nmHYm_Gz6>gM9|6_uAlBk4}>{?_oyP2+CU`!~LqDXGG2AxC>2QS1~?p->Pwz&QxK6?dl-)Mu5XP<2}_SiH6a@ z86bmw1N)n7w;Ie{Alt1afHZVdUvbuXh{0bpO=f3X8;##0hPMW2iOWU2oU4QsXf;gi z)K3fVZ$?!535Vw)nFoV}cjt2nvP(hTDEXP3t1-;C5BJ`(Hcaj4?zsr}gRnct26G!v zl8P@eU9DksnGw8}-TGKf9vpW|f19GCTDq4@`h(@!>qj|coUVVFIr8b&NJ^|GiPBwM zAeKFyqD;-WW;*yQ(db%ka**LzXLVRaVs=1a0eJZiB`zL<)SRgiefpKKvz^%;__n$I zOkzw=NsoFo&5i$tyQGPoD49x#O^1K0fV4en{Js?uvMoRSewZWG}mQ4_xy=y$~wjW595#Ns*VI zw_uJb6FQ9dmjQ6llU}oU%4|uEVsc|ym0krOnd!41w7=18*M~cD#LX|YOsmm*bLgFu zg7-1%fJP4ijzE(!!#lB0k}oAzHM%U!9`qm6LF%q9u7xAMC=iIZ`@yh7p|JM ztf0D=C{$NWrg_Nw3Hlevq#W;2>x4OV3H{F7)YDR!S$&a#KE|O0ZyBw&__!Q-KCi@n>x}7lX zOO}I%oVt;qOy8)S+o&c`K>_dy22D6l$t+~AEvA3`=v2L>7+xDL`w z!d2Y`1h3bXr|T90yU}`AMMOEfKI_ZiKMD`t4k6e|O5Kbd3)Nkne|aM>y&7KRGoN%P zy&XU6#?zCWt{Oj~pP}wz=cPV< zeE9P5wv0f$qJa(f+IJXUsS8jOZ2;YOsZV)$(7)dfFj6`&d{@08+4XmvQ=p7;dnz8X zNTp7{l%qaJ{+pUin04-!kYrHbd6z@uwGdYg?Ddgu-wx7QvHjI^K3fyamQocSm$Efb)J{$48WTWBHy@{5@jUas)MYB-XZ_AEbU?{QYBwk2#uttjEw zk(p$}7f;7l4(Fvzzx9$sescL9YdEr_fPL`)zNSPA+z`<2lGN$-F(;OEyX-~3DMd(n z@g7AfX^!Z@FMC~@Exb*G>u$G(l^EZ2`B$GeKZN-@zL3yeR0K_oSQKzxc&xPBzYG~< z#_fO)YklECplX2b7hSRJ>;@a2Pf{kK-F>>(^FLxu= zm~WHaDk2LXZY_DVa>9OfJRMn`<7;>u#v64(7~ZLt<<3s|Yk}#r-Sr0fzE|?`6EoShz8b^huTB)w+LgCD{lmWB+}^to zb-@p{t(9zQv*b$5u27G1B^qsBOfShqe3L7y8~gEc22>cgi~W=+A{e1dZ99QfyI2fOM!`8GRE2HHpz!pCzFA&AKO_>aD4TPqa_ZS; zksqS!3paPC3GgyS50%#Kzbh5wsw%KUuw$aJ?1OyFnz2_zfD8m9x%zR5$T{yZyD84CdmvGRmruHU&Az z40(RVl+2dI$4XD2c_xmg?YYMCX_|^3s=~^@zxu&X=+8Xx<*V^(qjlciXqO@G-w3;U zO?np-fl{2!=}<)JjM*kEhpe;|cLv9j^SK}tCCLBoxE-RKzRNZp%}Ft|pO`Vk_N;ky z4_ADQuOpkLkujXol0jZ`DQ;=Tg(h(8+!!%|m<#3O`j^O8zFZg7-*y8q{nh#C=8-~; zucA@zX}DW>mP;&B@DqCFCTd;V$W5V_vlv$)WmqUhG@QQq zNn;KwR{LKec!#MeJ7>5d!9G_u_Df=m(gXhB6a)%*RkXA(P7bJTm+mm;f4F|F$4!0q zw{95KqW<7avtm)d%#pL9;KLg>;@aQJmAu$OseY(#o(YUTPC4k>)1Q~X(*sN{#Rcqo z0*qP{C!l1W>UVOGiK;gkf=D@-l@a3vX$5~L)+5z zfWDUOL~WqlJLH^*)za%aX@r0g&ngYQR4{SyLd+a*oi5GEIulQ}JL)=s=q?uVSw=eZ zB4yKzY=ucgrX4;gois?VE^7pD3Qv@qz7cUCkyqVfuJ!Ylb(Nl;mSFtG)!4h_e7vYs zGAYYXn4T`@%w!L@`~7y(ZZOVw&uY>E=E;Kg(EeVMC4KfPX)w5(kz(5eYHSR$L*viX z3~STPTg0`O?{IC5;p(!YEcnFjqwI$yL-pSZ$YO--v&UU|H2RyF*ZS@Ic4Ei$a!Mt6 zv1Br?4Al0G9VUyz&PLB#+_&Ah%<_2Jk+My^DyJB-N*6XuK~W^Du4I}esujHy zT2)eaO40R^^1MMOV7K-A{O5d?`e5~z#8?3LefHe8{zw&;N*S+zx{hrHG;#Wb4-3Al zrX5u5=JNaNePlc$R_Cui*~QXPIy{Z;i?zI9pFEXY@bJ9TfUpZ*p=JpR(!Cb^BS#Zi z8ThqCDa&lDWnA1$=Z4O;L|xWpp(chvKKM2WP+ z2Nz-tU-6GovpC>d)?KP`p_R-PTWbg@_1p}jEA*0!(fL9&~QS*aRB3 zFcME(I`8cd(hV3^=F7Vgc=nIooJ69|pDi@!AA}Ma@eFMEM;PW$$p0j(})gwz+O32 z{Q+dRn~;f959v1$y2k4D(UjPpp^&xgRTj4krAoh#k6ml|cw3~tDRfVlQQ&a{W%IMj z?n~uukC}fs!hp^*_I&9EYq!Zl^4oisHiVGn<}v3{d{;E1npv%Oz=@}~?3Q0nsxmwg z?y$(*h2U&dPc+81S9eXHVwnxP*CQM|m&LX(GgWT&B0gxpmnMv86j0nzqVR!xE@p+& z^P;V8fM_8b^R|lnGEmAp!O%zWpz1HSz~5)7<;~(bi8NGYU{1?J%$xyHRyck*eZ-n#=L8#1WG}0H_xC%C|7|&b<41`=2Gj!M zYV0yv*0sXm%p@7U{FOLLD}M23_|1#wjGc&p;b19!g#|&`DpyT{r4RD2KdA_;^kCT=)5+S;@EI3q_M*9M>*1TdnAXIFW7#y2ETNxw-)RW zuwVJ`^{;-Zhw1wgwHiIfONo6M4gs34LjG8?ep)DI$##>X;%YJ#3P803 zFue$^XN}%w6ov+6TWZ3pPf;IEmh{saFNHhc2hI0>u9(}`dXGqq9@5pA(vKWT?<$mC zcH&6@XvOmsKvK11hf#o!e65Xis9>BEk0AqEX;oU}CNL??>Y#L-lg;kNZ0<{Z!^g|% znT7-7!79cv1(1?dO_5^#4)pp$umuLJeb+MR0`E6ry4S`%1WGdifB5tDC0hGrz6$n9 z$Ic$jF%A-+4uuk+4@O!e*O%4#vOFQCo;B3-XLw+Mp6#4%{PIZ!<_RE6E^)4M@Tei7 z%y2QNC&j4J|9W`8shYljzKlOy01A@f?emycms$Q_ z@I&KW19&w2e^eT@-KbD`D1s)X?y1fKx?rU+r~-KR+l3nOfhaEKD4)<@w{ z>1SMw80<*ie)e9XMSR>R^TnqKSoeM)#|uZYk#}|dURRY8J)dRJxwQTBZF%B<4|Dq| zmNUOna8+dkbN~Dv`^38Yr*p5KJ{b-QJKC{h@fW)FRXpb&^8%XS{L9uos7vz0r*uv% zS&;PqJ6Tn7X@K)b|6I8hVVdlfI&oT8#H_!|y>x57vXOgIaJ zMSK8;21WRbMf+blTtXZm$3wXCQ>0VZ=)RaZk3;@#cP40;+vpi>SIa3CtS2K#>?Y@m zuCrZGN@_Tg9KK%aI@rn~ukp97r|AzlV%c{lpg3InPn+c=v-+>KBA!8U)4}O37-fe6 zYEDNh$G}B$-Hr~fX^JTr?fLl=nxpl#1f<-x-`_?nGVT1MUBlrc;Q`n6jD56Nkk?m) zjkCwh($~0}y`E+CGC!Hl@M&cac;dXW$K3VjuRMtXegy%31d=LXRi*xPfIL+^;yg0j z&=|RhTQ(7H=5(NTf!;#Zao{2Ix(8(ajJxgO-I;-cV(3xSzgU38fy~d;-eHchkMaTO z%l!Q5JA0b$q+6rahFph5S+E@V@Q(`Ft4|JP|1*HrlJ0FYso8}mR zD{XjMQ19$Hs+~M{nA)E(DB=ufpuP4o`|S1kzUihzSL+n&$mHp50Or9QaFso@nY_vG%2hjGB4wU zOkKHU6~;2n{x6ld7}3iBB47u%?%&wUoxM`KSvG;t!c3IWuI}3^;MviFZzOI2#JFC= z=Z_ixWsJ65rd&|cJ%31sMvBxY@l}5Bz48rCVvQwQXjwBkbI$g@p_vhw%f3;6A)1x7VAwn zVALL{71W45OG?RW^5f^CZ*2Q~dxNnQpYJ7XB0MQd&RO{}hx7*gg+5OAe!_RbsLfW@ zqPRH~tx?K@njpWE4#Rv1c`BE{NT&I)f_py=ZoC7{2q5o{sdMF&WEKfLWDB#oin->v zNT#m^hDW(Pxjkljjq@8$p(xvjmiqcVW|XmOs!L16a9xqfZ1lk;S8ml%<7mw{67bcs zNvX2t_`ZJ|AoKl%PE2zt1E*E3CTBHqhn20NB)UKM`Z)7;uHn_my!5b0qkI6v6C>%3%Q~QLU566ezuP$e50rO^jOu6Q>*N6 zRnQyn3^rGPF_pG{Q^6YrP-eUIASni=Cs%->U+7^~e2nT7v%dfPysch1C(#l;2+6dl zS|hG%P1R7Af7hcRQk7-mb?kqk}@z4YZYWWD}IPRzv}X&n)I@5(1S%#su%XbRgz=frI-WC*HoJ!rqmKVqHt`R-1!}-6G~oT~_H%xiR3H6TcVVOf zM`o*m9uzO;J$!~&?X5!E=IDvnSha)K3K9MCMeoVebC<;FMY9SCSJUa$(D@Vk-BS!{ zhwr+fO%*tGSx2Ts(ZlM^70sDyN2Z34D?r9OL8E2AB9F6}MNc9{nxIxkAHUpse)K_! zX6U0B!6JI{L)t~7j0Yc3 zf7Qyxic-!ZRio=uj1PG<-tT7V)WhCyQBAl;%^%x-!B>jvC#7x{COaFj@n18VlwibW zaF*{I?m%yP63-95yu3DHN6@`=Wadf<1XVd<2dxqg8_Mvmz(k7aF7BYkRGx%*v16E$ z0E6QYSStOj$wox~B`rXh=-DRY{36g(+VR7rjMP#EvX=IzPqu?w1W2iERir+br3Fgq z{9XcINi_w`BDHqeYa#bdPL8p;C@Db@@!)o2rH{+D(Bi^GpP~!>E=3ITh(5zVQitm3 z(76;l(P-q0^Kka2*@5+)sC&;In*tnHV7|>f!B)PU6Rdh62fo`7t`faX`c>aQ`MWp zOEXPJuL?->PvKGPWQT~AG}g|9I`{FRnCvYYybT=TMFF{SLRZYOxro>3JitxGg#h31 z=$YN^a{#5nMZWgh=VJI&SxuTeT4hJEAcfeNg(ayQ68-Yk5}^1j7*VN_eX)f0n`=n^ zAm_4{{(yzl(uf=n`1)5RQOqHlC194mrewqg%cuGlEHIg4I|3~djSSqT60asrsfv1) zkdPPqKyTpl;{WgCl;ru9B;qK+sBkg`S@kdwSI4tw082g+3FTv!J^s6^*~z6+KIr-D#Y25?FH(J zh9K;Y&>GU+K$EsOssM5K(%YlmRs!2jdG@$ks*%u9e(yF}rcZn7+E_I{1;${WU@1$d zl>o5Kl~-|j@@0A0{d2h^>b8?@n>HO4y_a;agnesIx3=hs;1(E#g?V(X@KXW=9Fq58 z556XKCm?s5mzg5k0XCXkzl+=u!2Agt)1T-FgDu_#oH^4;`fA&qH{HR`sVMWiKhC@e zbyB(E5Uvkc&kb3N(XZF=II&HLKDh*l9Sv_Ur^Acz3cR%XWcB8-Cz&Ap3F-ZGnG;Pl zpsNAQ6tPmU6quT<_qi-BZ4)MN2}i+nM1PcW$@KNY$n=1J)t!#;YoPLre#d8MfC)eo zjcY5v;c%!;#|%Va_W^T^Zw&)MdN5y;buq*uuPlv$m5SFa{$!Dy?cUJFwk47 zg1Ls~XR667_VpHbP#eM5Ec-rRSVES7t@`S1LFH!2eQOi_F-~3GUhWQ|q6gHqy2|s~ zojO%A1P$u)nI?8z>7f0c`t8%B9#1~~zIX$(_fpv&IOrf@Y^uPB?1Y*mx%_&?)e{vk z*IW6ScOMsgIIl$o+<>(=4-MAeb?xaf0p(X)mEE7OUOF$PO%NZa2bthKOGj^xsG~b` z&p(lUt%D?vJ3Yl`%tO#GfT=Wo)6*lz<6aXs4%mM?Gs+2@yzio5+TY0tTbJo-0Bvu! z6D-{fqp<2^Mg2aMb;o7#kDKVy%%WC(dF&h_OnzM1>+^+g$GtLsC=t8|z^nQfnHym; z9*wAS>QnOG=^eqKPcVnMLm%>9yWlQFv;@??v3ng9n7Yk!%1;g*&vmE;L*fW(J%yW{ zByi_80?DhT%{OwB6^(4_fAY%j-ft&L1jf|PKoL_uE{jdsn%6+~EuZ_kC2~=qxdWg} zk*R*!lp5|=l+_Lap*b}%vUb4G{Z$Z)mo}M%zngb{)=xelu3%$;9 zV%1Ht`Q0v(F7a25=Kz9-&EEO>TBRrcIzKnV!#;K^U#QzZlTLtwW}Y)HK7fZP64Jbs zdyow0L;ybbWO38#p2yjj=W`DU8APS|&Isylk*r+u7~ zI4Vgfa@H@}jeR%OnN6U}P$=b8=(#p+TP1qlHq8k^9r+pc&q$krM6sf0MTG?F%D$YddZ`1wVg& zcNiiX1_&G&aw4ky-DM&k1_zbeAMTeRlTQl@Bv(MpYO7PLVtaHTqPkw$X=KsKK$t;Q z9bBrH<=?Dm3~x=N#L~I-@r&hIS*>UotS50xNvvgs8*e62tFx2VN!V4le|p6L-5RRY zufyZ#66FrOKQKUwQg03|&16AU0&4e8kow$y<5ggHG5Aea@D10<^*@&r%ms}XTtdn* z+y{I;SCqD6qT<%78(5+kEU{NcF6}X#kU8boP!r%6A~HW^@#e19AbbNEa?8s?O&TvF zUs#a?>jT;M!grVQ)E+I?N8&&q{%)7Wq)Mt#dRhCLiN{{-Y(OHECC8`%yui_Yceg4n zDB^pGmLjM9m64~$N?g-Cg(2%iSP~nQu+1@Vr5*?3 z&nguwuvmrT5eqb1*4~E^odH_Qa*ul)S#7qHGa#J}l2i$Y!B|SvylhbAZ868b9_s<4 z8UB8vvq6+zi!3od-t?eyyq|t+aDgyrCG))a1R%0CL_U-681jG%UbF%R#iOe&@+Ydk zer4}OTuhndwxv4v^1b7k)_5Oz1c^HG@rfvq^N>tCuob#=93M6QR-&f0cS5-NfZYd< zLT9dOrh@;!>Z2RrwuEboAHv6b|2k1E@;a>e4Slr{@Y}Jf_1J6U5e?m*{+1r_TJD(( zOg~uqkuCgz+lzl;{8B+i)f6~!$*}%lNfF+3>}p8q;CHCqk4Ym9J|TLq%%#%{1kegg z>f7$euD>-^%p>*SJvp=(wYqPGJdhvzT6*jrX4y@>4=psu;s81mhVKT1mE;j(OKG zh`Pqx?o`*`aw{mv+D@G3GvZHtgNq6f{ttwzylL-$tA&2{3qc!aTK(a#-b&TJZTU10 znX9Oi6V4G+pYDBL8a_xDH?akPs%f3@3bf2(Y<-XQhgQUH+QDO7FPTAu0v=B9Ut$@|0uo4WN+-`=rD438Xlb{ zebGhJvLYT0m*nwOl%YI`)Mq1+0z5oQj{In`u=0L;bcK|H9{75b>S^zltLq41EJXK%i)J5Dd`65_lYuE>ZYNLeqQ_Dg~d%tZxE`?&>b zJ=j8oX<5k=OFad{sulY(mu8FG*KJJE^Gm0fK+gI-;s)Z?zwcoz57=4X_=@$|&%rxr zW#H4wLS-e)}Noc+ly9!z!Ulwlpaeg1s7ZrurBFq z9r4dEf9Fw)1UAb18`d7wk?%B~97W>q?tQGV3C3oX`OWoot{STUZg|C7+W*bCmClt9 zzWqh!tKWZNXd}U?d!p3tBZK?djH~OXIIE%shF#>fA3T)shJIH4!U)WBr^`s?*AeL5QE(VEsD=%v36j8S7e)>>c!u%lx4a!`-yaT7JzKM2hhHy1%u!I+SmnKB3@$^8{7yFFTA>8q)S)cQ-qGI@_4}RseTk%GYrvs zsQ&q{ZfSc0RBf;zZfzOCOR8S^fSJuTccoC+J-W#}NpABe!Gn7kRj1)I$&jMUW?d@c z4njAk;es>$+(tDRXEk^!*_l4v#B^n`-{j?N0sQ_G?8Lj2M$UHL5Wcw z4K7zI?dt#SP`V2J*&R!0$y>WmNl-7SkAuPC=>w?4*LRCrekIgc>ZHdFR?`z2{P^E4 zZSiQgcb@^9xa+xSwatWBnV`t{1X|YC-p8LFGwuBbdvQsc^v#XS)O~f8H@4?Jmxfk~ zY#PH14eA_2WF8gnb(^gL31E7Xe+2psyJ#!qGNd=^@U6tzkd3ihMAWDv<8GR>t?TEpX)t*X(z@uxNAP$jngp`=#PpgEu86DB2Cg3?lt#W`wlVjKq$ z5k(ijt*ayz*@qt}o~{A8Q8}laIr99sXGM6~F8{(agA6zhzWUQ3(z&l+wHdR~G+&^+ z1)um&g7v+AB>$5vVQIb1|l&3Kuf*M@qKux46N zpq6M39_0)>YF(?ea#n5?6ic2Ad~Hy1BXQOeT-Mg2s4kel&!)-KEPPhy-Uns(!rh-y z?*l{U`3*np6jgV9Znx9DNB@g|z@9_I-00)ixqSKhe0XX}-@BcMrle6zl3beo%Rab1e;kg@mE6tH?*(Q4bSU)atP+7#B!f)18c{UyFur->2 zVcUxz{?$JI_nB&G%KRDqt?0^r%iMjj^FEg7_lBXjN+{lI_2xi zZ|n_w*>JOtTCeJ`iE~xa_bOd`mIbCf3qYT*JwOwo8_n`j;ojlC%(oZfd!%rVTN8fq zo!J}uymyMT*66X>2whFHHeJ30=KVG_vH@vNJT7C0eY?Z($(QkQT8jj&L352KI|-~g zbKOvib7UEXy}qZQxsV%shfKI>^7MA>_e2XKK>bDOaFq+ui{0mm{yS)I%OWE^J9hk^ z0-WP+ad?ti4dTqz78m0PdeF0Ug>ExRG&VPcfwpIPDiu}u1h@hEQ=SP4OjS7qZ^Q#xTUBfaaY`z zsG|w$QFrd(ZSf7>DjA!>TIhZ|f(%cNt-{=a9{-gf3K;Fn*mEDSgLbOgkh#; z>N>sr!7!bgM-37E)2wlR{TnC_xHGM#y^SZFK}&gL6NRl?o$Tx;y-4zA=2Ch@x|if0 z82o*V{`yx}TwW@8?Z1>j1NUQAAI>yUp}MOiJuSlbfNXA}NX4|t|3UMRvp~^<7F{r_ zf8+I(8)yf)?Dtjr<5};=R^6J9_(6n7KE^Tv%zD|GYhmx<(gSkTsi?qv)D5`JR|n1- z=$Z+5R8`Y@!~@js=1+cys1aJV*WO#;w^R(@=8A8rdML0qHK%SYE29U^HQVQ11>I2S z#_|$=vkY+v%%GwKL_O+EvmrkU-u13+z=eotAlxC@ zkIP{g;krUrvlXO3uvk{@!Uz%`WlD-FG*n`uH>KJF2%Ihe^SNlEw`inEj7B=?VBVSqH`27ewnW`~Q4pG$R{m?ggQl$}fQ&1D+H57|KuohRoC2sM{tL0<*42G_%y2ln=)9oPp)$u{4p^g-iYAEU^Yi14b*X3ZOVjkqfEWz59-=2tN~tC%imn={E{wpj8x186NDhKQQC_{f1XLHL>dJlYyBNFrx0I z#I=F$)2^m0Pwz^;%^Gz!T(lw{zj|d$@+L8wM253|p&Sgq-m~owi5B?~aq9pl4&4%-xTlWW~t6Y9$I(?0v7N z4C;vEgr>5Xh&e?-*PZorp@RYX)5qP;mOz`X6({@1&3K_IP9mOX!jGVrJ#}AQTXxPD z9F<%>QE<}&)Ktc*Nn+-Xe$x+oU%?!F3W$|CzCQ7W!#h zCCF(5XMorMZMjaFZrqv$n+O9cDJwjJ00(W+u7LVG`IWFScK1953c!z3t?q7?1UM%)x*AbCz5uU#h+6pDq9DH*%>^KOG2 zqJb23rU+OJmN&5rFlsFsr14~RpQ4xktN~T&i~_+EfU@Yid7=6vzUt-&$+MOAuw2x} zBAdkZ&%A0<;Qn?pMEtwsq0nMC(cZ{w1S1i!PfYfO5g1HQqr5RlYrN-O0U~2mFD3yx=gP%~EbgKFORrdw3iS z4Fbf=4IBov-Ixgg9dDglwn8Tfx)O17fDmJ{vvJhp-BF=?WQvEYLyb7f-yv^#HX(Cr zvRM&EuNaFc8|RFx5&;X2I@+PwHUiNP@(z2KD2_v9#r8g6KQN_#Kkm1FTr335B@+9q z#Vpyy@`)xhoC+0H!rzm|eyJ+%ggz2AZQ9JcK_2ZLyKM!6jk)jlEh!K z*w5v4F2UQe`m2x2GZwu8jjRdGX&W72-`gOgBj35?z%%BjBlbuZ9s<%r#76e-=f9^T zwh!znd@2Xy4TEW!g}gtHX6*rGfzyfL?x{%!9;?VN&$FXYrvE&RHytu~-L85*vT30~ z3Aw=x=xO_|rqvXP9`^E_u`ZZx3Z@lLyO^~xI*e*Tw=7egIO1tbxKwSHo}K*aWJMj6 zQvh1`z~6zHn^U0ahjY`*c;XqFX7`6I7>yUdmzM<&9=xr(lp8vt}sps9?m-1^fJ?K7~-6$lWw=x4NwQ zXmsMSX*0Zd?1gpSvKA>Msp=*(L$AM+a$)8%i;*!i4dD}g1$+W@skpo;2O^rHhy*cW zSf?}>NHg>rL_U8%Eguh0Ur_=7QQDVL<IzZ*`y} zifH=Sd=x`m$^ZO{0HW|;*2i5(9}piLnSA4N@<=9>oM+}aX%t%Au~TX}i!2#9ESqb- z5s2}+j|mK2%D7X%h_6whH0vN{#JI98m{=6h6V);6E6@S1ijFVN+^RBSyL~oI2H;6x#-LTvPTq&Q zV#w5_=|$edx-47aFkm+>_?wIuT)E<;wDt>eDOrlvGwQ1*mgeC5JDPb&!38r-H`=x!Kf<8t8=C04@dITvz4-J#g9O1(RZX0hhZ|0o z+`5wCiiR8#4R%Bnr;MFt>oP}GwVeF~&t9n-UM)e7cb~CEc$9FWaWo(7T-GL9PidOz z=QJMt@k3X7n7E~&2?3Py_j6qTjIr$*@V%WlPO9-yrn!cdI!Wu&5z`d*Gh`^5FBc1V zA~KjoGV6W?D;F{77jqX<1tOvu_WRQl-q+7Msq2(HmNl847`W;d_VMCogwOkfxa)&Y zLM zVl)2}r=*cO((Lo`rLu0}5@|&Ty;x?MQTH>WwGC-e!x;=KKA*2=5WS z-MwI7Wk$71WM?X;RCPu&QPLI}HFkp8NmV9uQE~1-HizL#b3^$z4HZyG4#Jj^$l+!a zQuFbEWYdtjUbqrU2MAlJ@DIXDjL-f@nKDUuZZ7B55wta)H=tR|7?jiNi|njfDOktu z04i=SS~^m%Bl2lCL^H}ArDgj!Q{cjG*oW{+s(uUyyL_p?YW)po!mBfB7(ry8Gs8is zIbmxvirxgZT^5>7n&=gCLVGL&SLySBT^&d(Vx?*!{hI~~MCN(kl{n0={6hHpV$_9I z-TEkS9^Fs=@mWj~@&_jOkIT{$Ypmj7L=b7Q5T4$rZ%Wnn7vS*?deqqiE{-($Bk11W zL}R-rn&6um&z%TsYtzk7EpTg|1V!iBTC<_xu{!wUgl>}+pVa4v*IRcsK0oC)1FN@M zBIaUG6F8FihEr6FleGEr{(!(o&L@3CsOHo=QTPhxR1ve%jAN*S3iS)*Ge*ZqB6D3z zmTi9$vPQ?vQlNDnxnMdT;Tc9v7uTc-DxkFS_d`$`&{hF2^41VV%b3)F!kY?`R^8Trc9E zR9{$pv_=cLy57xGUHp9@eA<*?Xw7ZM$(7j3DA&+0XcvCYHI3gD%-B&P~p&pQ%4aO}e@(4VeWMw8^)Q_Bkc` zq|{4yEia``d(Mt)>uFIX@y?!LB*op-21>4RX}L`hj=!w}mm-$m_97@Z{r#xsod7ce zv?t#7rZD}OCMT%DHr!8<+Bzjn{Vp!1=(mc%wdb5(zjGM;)y<;2mfb*C$vd))$`~s> zgm3B#^%pmkgD%ouwOeVXBZHfk+s+ELb?+|x{oDWgxAgnOtLvL}H$F}oTS5LMG4y1q zXSb#~m5X0cMQ3!%5S@|F%(Py;xl&=$8zfZhKA2O?fIH8OZM(37BF?hsz^=F&pndR) zC*qFSBQBWZj#f2ozK&=Q+??o5b2|u@XgMf|5N2xsZ;avNfB(lrNXRlbjH873(Z`-) zcp+22ZMx6pSY8bjf{asNe*`G&rI)Cj=iR4jvV;DWB=oWik59|h?Ay-*dG+=PePDXR2d(-Bjeq-$=+HUZP!?8s6&;5FH`v@y;ej0;P% zY2dHhe@837;Ah7hL+w+$k!Ib*1xj@dbz@Jukw4|ctz3o;2|dVwD5k(2R^Ke|I**=x zcgNMg!-vsCd7hIqu$lJA=W!c*fE{HPae=g`WD$i~#wtEu>=duz81qjS=p$@3>{w5d ztMIAvc%(r$s`m+dQq4d=(s5TNNkGLBQE>T5U!R|2BL8!3(Dz(d!?aABPEES0OzzDs zQ!lm+r@1Co70o^`@JxK+rGDJ&x`Up<1QQdioq4 zYE3~RYB(R0tOGw-&0d+Tdww`s!Sw@rf9e^B9t~PpANqzl`0CU-Rl&yrwwAeIwTdAv zeV#IDyu2uxD~1+j6aL|{N;D;|BaW7s9ogiCrU&J!CWHNYZ%5F8mB#eXM7t62qwwUr;k5Zwj2=}nwQ+&6_}hz{ zG}Y!EZaYz0>FK%X(>AVG@Z7*B7K+NEiK{c4mL2VVocoe87`4&w!GbL{KmUUVynSRZW@Xq6(i?=veBRF zoh!I@HKayVHhVH}b+iKXaQ^3o>EA7Vd`(x@Mf#gS`Z-w@a9)t*>JL2XVXmU%2aS=q zMQ+EO1uG>`zy1`>)@G*Pw!`$2AWfNd#F=-Ts)k?b1(jv|Og}?QWA{u1Kf*oCR~Oup z|JUBNKQ(pbaXQ=8aaLh>8CNJGvuj8j%1a=IqQD{*8uFlpF36)mc}4O<39s-FcBwG# z=rX+tP~JmB0#OnLge;E$)+!qyt(XuXgjd0!P(TwPMj-6HahXj!_fO#daFh9F?m6G{ zJ?HzmpYuH@=UW))Zwh3A&7>fx8!W~_pD$o9?qZ3#XPQD;B&Uk#tG8K$x8BXQN@Xu< z#b@p6`TFlitZ!)Z#%`S5?Df~~ax#+=CPj=SMwJ(sZmoK8hn;p6XewVsVSkV=m*kZL zIk*ryae0X>J;6$g3Ry>W%(y#n4$tH#YX4aaIV|!boGPW?8^AG%l9m9`X*|Uh(3R{Q z&oUpUvFe=W|E$BH$2TcL@B}65-Q?Df7I2Yxnv3U^M749zLrizhnt^9Mk}30W?}&Os zplx^X!mBr`l_U4LnWyru5Tn2|^W{oKS)+3&x5a~I%}(U59zDeQEHyp5Vd)(x--9Oy z*1JF1eM^2~qJUXX*2;tu`C}AWv%^K^20nUBbsgsGFZ8R6W2B^QbnF^&O#A3-uOXI& z^Tt|hC|IZ&^a9B;l45hyW!N5Qwmc93@_DFyk;aKL_8f%v>%S{N0!3ZcSIWqV?_W2~P#J&g+ zbK)0}b0GT>L}|k{3Mo#>`$2E`beL;S-z?(p)~Nj+Cv%P80bFe?^l-~?e10i0paEW%GyhqG zIRw$vE%@9bu6_6NUrl#k?x2(7yJFFsVyk<0#<+HRL|S3dmJIQXoJ8u<($ymiV}w z(5Xc)D;o_z@wxLQvsrlzit&E}FIV9x#~l9kD6MITS2@E7l1u^7y`#eZSi75B)Z5s* z<>{2C1cPD;iOqk`aVpSe5ZoLkX+T*>do)m{iKCKUKGA7xZRCrDl&RTs+39NH0@TdH zl0Z24A}P_iVSOV0GXYBfj%*I!%rAMWcIt@GZ$Lg~3E^I0U;x~cviLofW`?a%Htu|z z;+lfq{1kglBzeeyF6P>>Cf%E-K zrCY1flQl$UBxi=pFat#-&HP=T!$g+)j?qp+Jy=@`D=`nHnTK4dbHYQ{5 z47a?g0;oXfUm+Ms_dj6l^>GBB^3Ispr!!v7wc8dh8<8C27}$h`0Yvz!PX|B6YYCm= zj7z%+R3InGZWe)J-;SLmCw(^Ahb8Y9+KWIUQ7T@4o$P0fDovQd*>ym)W!ZDcUCgVi z1%4N*z0cP4B3$roDMh#9Taop64PTz)A3_&$#bSlZ+e*Fw@sj6sqi(kdaHcx%1}ZnY z!s`*xh7zjSgX3&)St%D>*J>#3_@gEQuX%*PC>FI?I#z1iA=fuLA~yd%+Faw+`^084 z!#HwJ!qMn^-NL$_er)nKw!MR&ksHJaLuCXKIxE=I=*dvBn#`YKQHeuppxX&W0&_$p zsws!>yx=u%clg@B&#GyWv?7{-8VB?ss|L^4an+Xze*!dr1s1q+7A@^Bs6pUlu#($p z+amXJKi~UATWmz8OUHlJi;#N1xLgAg{y!hv|K%5ewl5s8GAV0c=vr(7y!*nNrf$D4 zOfaF|`?7-~_bmZ^9@x^<8V7^O)EWogV`7a1*f24Mnd{@d!@s{kNc~)g>4xOki^PwG I7vlc$U)Tb|djJ3c literal 0 HcmV?d00001 diff --git a/web/src/app/public/goalert-logo-scaled.png b/web/src/app/public/goalert-logo-scaled.png new file mode 100644 index 0000000000000000000000000000000000000000..48c29229df04e0093deec12bc24a891529108806 GIT binary patch literal 13636 zcmch;byS?owl9jiyK8V7*T&r)f=kmhgy1yNNRZ&}?t$P=kR(`e2@;$T+=5$hyR5bM zUhkYY&KvKJ`^W9kpH02Z}jkdmYmhXG$0 zI6*u>bTB7JXDARR!SFX<;LH7A#oP>Zf0KAPNHEC!C6LZYLz_;{#T`N?$Rz{-^9l&k z35#;^3h@hwigMEN@$iap^N4Ws@&kB$WRE$ih;g5iya zhbxeq+uPfl%bTCe#odmZS5#D#n}?5^j}P!d0f72Adw^g7XDH);ILJewV0U|04|^A9 zy1zJrtX(`kBp6;a{g(+&uK%!ghW;C-7r?k-AXjc)E}p+k`kT-O{12V0r@Q0d!fn9Z z5J!j;#MuM-Ld*LPt?L^X4;Sbgm;Vpe|5X1s0WZ*MX#6ANf7Hdv=^qhL4+XCmGyV<8 z|40qh_i=@A>q4L|p6+0Xg4c_hjDK0<3Y2q)fIJ}bFNXxfODBi|cmx5w0{XoCz`wo% zFONVTo_~>QxY*d+`uulNJ|2BuKA-?UkVp8xk-m6<4afuZ|4VEG2HLu~JAq#Gw|4^B zLAYI=?HK6(K?Nx1;^^Z3BKSo+{{LF8A}6Qq?qX~2_)-DYRg$GsQIHej6%`T!@Nx0} zt*(X!P{kSQ0dfXIROBTXUTo#Ex3>X`3fu5Oz=E~_YY2!JAOI5N1Bik^0ssMiVL@9V zQHZcLkKljW%e#O*|LW1d+W*VBZCt=FJpLc`gam~Jt$F#a0fM#=Fo4HaKm;Hv%m)J4 z3WFiS0w57yYyN+M_o9uKyZuXifgJxU*T1x~dEv-s!)GH55wHe;KsF)(0Z}k7KvdA$ z1^}|Q-7|8@+ZvOUzp#og!MaiIrs`}durJ>B2F00e>m^6?jBKwc0V zhJP2@{|{*VZ^`|4d+#@p7nc9=k^iCN|0NshV(Z}za)-#+y&(6W*w_9~6wt}Z{T1t? zbgUX6u)XtNp~3ba1;KAX&UTQO#KX%Hl+>23qstCNe0iM^FHFN33ILkm;=G@tKanoH2r3U%h7 z_)`+E7TrbWpg<-|;!8P2Xft-;vA8SOE^7CW?z3*0Zusu=vJ$%&ij+a-HJP=EZml4( z9ryDh^K1HM)jLjf8@AJ~;Q9Ga9FxeezC+7=9v;_%1nLV*&4ZsY{mFjk?J-1nEemIb ze20U7!i34)KAU%w1{WY)BZlZoDvvJZ%Ld2FekGj37)LW#Bv(Yw@AKR}(~1yUvs(Mu zecLTIx}+3zJ??RCCNf0d4UiVAhlf$SlExDDBA||GvSP^ZqeF(egLF8b0!O;ix~kAs z&iIxPSOmmCerq@HM$WWLKCz5c^hbUqI2B1x@{!FN%wK2f&Isx_Wmx#pO?Ziui;3#P z<|=o4(uz<}byzQ5)nm=^!29u6V%(xrp+;B-81!tkz#?R0pga-+B$+_`Mwraj`jIFa z;aVwvi9dd6D!vOPJwG=12ZQCr^_j2zmd98lQZLD-a_9@1TsPS?^=c0Gaa6lVOGqpxa0&|-0bNdZVmo4q`^Ea?DdV?oV#)Hc0y8P z2BM(QK8}r#;^N}EjXtq&Y;Sq5va}3QF&$duuJiyKw)79QC_NzIZ}hO-q3sNuaF@IJ z^`Pq(CUe8>!;$#VhhmwYZ*JWO=lIwpCPeAX;r-(=_F7Cutogl=zC>a<5_*Cnllf?a z;o_}X9c^Km+y}oNcH~GE589Dv1^$5tGsXT=Kckkb=rS>`}E|?8Ai><;s&wi0vw>F3??XF2Gm%4aXU8Z}c#{;QK+yOq z(g?pq8N6_8jUpiPpnobHreL2Kl*ZgIWN2LsNJ>7lNYytc2tA~=S=g{f5u`8Q|EMrb zP|vR1u+;D9!SvxA`ij*)^syHU*`Vb!+08w^D#X|FjjRb19a}5e!`9c{f!GaZaZ7Ax zL<>xR#2X4%-^=kmks-?93B&?~75IS3X3=kG?G4S{T72jZ1acuEwbJ(V?Z^B1f4Zo2 z@q7`saqYLE8pEe!SzD?7Q7dm>6;6l{B$0$ENdM?=L8(%yqEN(gME~`pjylTuK~*dD zos!@87U^4Stv)u)7^y{~wv_Q9rD`>-N5#hoeI9l!%Kq}n^7=ju2QbJ!t2p+1axv?g zHt?0TG2WTB?%Lo-c;~o?#YCcob?fN25(-+d7(CS>Yr;mC#P#3$gk@!>MlV2EtNnTN z{+1FiTERXwNKhu*BI9&S7Pnx+yJaLxbku`(R62rUkDc102 znQRc1A+hhRV`q#;i?SvbLLY0Tt94~o>n_RJLwPVw#={u;$8+TLtVkik(~Bv*Yi6Iu z?SORcMzpe06MKI6gr5xY_U~}#;kZ#X5d)B>V~h~65Jd~&$qd@Z8<=^`e(7#i#qr*= zpzKnKK;#CfF1y0{Tg`vE+&GkkcAv{@uSuB_bk1Zx6ohFW(lbHt+&K=i^Gc}e87FH| z8N7E9lkNjT#W$Bj*2TDJ&^{M3?u?*}?)u7JsbR5rldJ};xSsCGZ3Lk*M)aDt^Fa{U zqNmDb3s>fH5w040vW@Xn1wov+3^d9Z{+}nugfS(sUOn|D|mJeh#D||ZeiW< z+{57-;<75B>)qtXQw_q4#+&_9RigS{(}Ay!IhH2F71EF~dSNd9dgLB)KQ7wcGDDu6 z>$}HkU6$8tWcx2>-)H+JB{NPoil%Q*r_v(%-x1NV` z{MaV=aA(twP*?8)cNMaSzD3+U+x^@phSbn4Ip?auygoL@bk!)!j;Fm%4;EGK73-YO z3*f56%5qCNVmP3^sr@unSg*yomCGW8tcoCpxIMuJF<_w5CD1e@;>TkZK2~o4l5ice zGPRZ-?zhIun<^pS#cd$ThQ)?Rhl;C)_!dN^-;0?kuvURVSs^HCyt2FuWVh2qyuc(}^g!XNZvEt#w9cpgUAFw;eV*EfoBv2i&yKWcHZ%Ucei3%e+mmQ@*J*eSeaAkyxaZ{@ zkNUZaM`ZIBL*z20n5>sP%WEa~Xw63lv2wC5Js^X}gxMKByKj=29^0Bsd$jN;?~lO` zpH!7qt@J%(nK`Vh5!ga?JYy@t#enmES=`k+yzmNsH+Um00lRJB4a}Jw;cyPdo^-&` z?bD4Jw5qN|CxxsDH;0fzRX>FOHP+qMGzp|bVl0EW{v)+@(xTbHA!zKdyFHRocrzCGt7yB9c=? zMv1c?MN0O362}w}l|nS+NF|n4*;J@wM&%%68!cBg3H`usNPKsrT9mWS2O{H-n*n0# zry3h7u(}L+xxfCBrW|UNrX6~pjL@1E4x1ra+)n1*CVdF{%|ovi5*ZT0$*n2)10;*F zpMW-s5@MMeexn~CoG_8h^HxWL6`Ds>iOz(DRX6T2$qaw}it*3`opHMpL$wKslb7uZ zlRr1}t*t>Kgp>Ipe?9kurAu2*PRg@gLCv{VwN$n_jIQ=BIq6Hm(O+W4uRB= z(Un@L5$IL&yKC>Ablj^jwYmPDok_h;M4afbgjS<~EKjo^#6Py^P+Gz^HG4f4v)Tmp zuVS!2oc@>#g+!f>;P>h}zzqGq+d)4*;;@f|Z>rM!eGaPazL#RBD(TVCWiPQJz$EQv z#X@m~lp>M*d>=_I=KV4-ljr3DQ*90tC<5N1e-w=!%7Qasg7Cg}S$Vrvn)g;prbj;Eo;2FCpL69>|>`6ndjGW9#jWmA;uO6%Juf<0Qn)!o` ze9%_yBkD2%*q9h*1wDEjz@2vwzr3J%+evWDt#7a9aA0HRx*Cq0PdoNt5UU3BS*3+^^>c0@G{E+JPw|JSe|5bH3zOkK4Oi%B_n!nM0kLsB4!<$S-C{H zw0K0A~ zEE%4Q2rp?1!gDu1`_=r&;Ad*IKAxOYlsZ#Uul}$oF?qE2!=$Ic4>?D|m8-%?9H;9^ z)KnSHps1*Y?PTBm*|Bam7_eN8?mj*6*~Fd|jk6&x0uboNHOsH#-7?meWtE(mMIu%6~d>*0QZ@#fmf_L0*Qd`gIc(v3p##H;5ILC0q}a{yB@|$4-0mP~^(ped_XVORQKq|9S~B z`$6zbNN@+|=GIm#4hrpJu1-tEwh}J3`^-A`R_)kzQ*l*HJ(MlWiSqV0{sd^4&F-{u z;@x}f-j2iXoNF^Wy$vyH!U}U8{yx^qBb&kmAo;K%-{TZg?%I#b@^WfNRex|R4$-J8 zJ}YT=CNvgVqimgGtoXnfd{%Q${;Y;TgB3?;+cBD$67}<-8Is74zLh_a%tB9G&}oP7 z#nvJSbS}C2z-#B3fKfV`YrcwwMvagzr(RUZKB

QjfRWg! z5PKcI;jsHhDBgtR_zkmv`Iy<6g;n5Mtc1vQMW1fFQx{Eqzmn1~sb0bTj_SZZS(KXY zL5H6K@_=AEFrpvE=OPB?GOKh^N(7G>cWrBtDzYi30RS8Zoh(Js4;d~UqAOF{oDjJ% zk5xJj96y?bTtD{FpN;-SGh_58U8xMq)$Lm~535T7#WWnbmE$xUQ^#vT6xKHAne1wr@zVQ<~W%)vzvRL z2WS4Fy&bQ9%2(^^hKX2Shl)Kux8Aman4p`P_cO6ygU={Q506}DAe_^XRnW>YSH5T8 zdanO52ip!NkP#-AE^HOmO&eB~yv7>k(x17;zuBNU46# zjMoP1aCMvl96Q&`;11h&e4j4M!MA6^b=J>=-WR{?T$ml5iXx}>3&(~>k}=8)-O z6o$(u53RuE55R3Q;y16!)gX=@vYh#GrNJH=-T9Ttnq?Isn*OXF0N>NlGs!i4{}LJe zh4=|w(}aoA>i#9W`}hKDDtK|1a4=4HjENSix_v=bX!q&;6ps|^RfD*)#;EU3st&7k z0c#nlTs{4jhu7mJb4G>e>!a;C$byS;~(VOmp{%MN>;d(0GKxVBsQ zQjNDvcGK>s^nczRQ6@S?bSH|O5G&bJVNu0#<^Cxr=5~(>u|RAs0Zed_lbCQHBV(9< zYcuTFIMxB6Dw!w&^XReUs+-C9g$&!#-_%~*a3j;??OZzcLt{_oKG8;GB;4D}Qk$B}-cJOw4ZEm!iNUN;bYPT1H!%UB2K61P*_^i$PIQ0B&x>SS#0mg`tCo=U7; zPv^E`!Cv3OLY_mr223Zr#p*4?=Fe(To%<;1=Xe&mT96738t}N^t|0tDMI*VfEsDq} zHC16uEu~8lB~EoPA{z!#m^wo3vlCzL^)UFdmyW*%f!1@WIdrM4zl7jsQul*a7eY&uDsmca?tp}yG&*sQ> zYau%KwrT{zo+&$FJ;vQFz6VY5$UwAT{NfA^`>X{?9ulDF|LjBk)YU!A$($2)tozI%@om zBF3UMfPFx;N(F74X8ThEVCIoH-o?Z0esI!hLc!LJEEBh)$y=#NUOHEm{KHJBVLh#6 za~p}__&PB)IvAhZBmh#ZM|JPj4ilm@3vHc{qMWEH?H!KIIpDZqTB-1%Bt$Q%@LzTr zYowiCCxpFoWERJzCA)Df%@<*3UDK#FQl8BC&IxLet>%uO)=aKEZB z_=4K$C-g#Osixx)T5;StswSM1;e3p$Q)?wb8Wd;l=SR$TP!Yvo+DUU6jO6rNcFX-K zm5+o)tfO7yS#ii-0dhM`M}rD?Q(JDF9XB-&gnIyEEt0_d57BUTPnnqFM493YGZd;=m@alciXe$ zLFW=4RIT)nmC3C^#`+Q}X_ORC=52xuINxxl1Wh%!&nQQ~>E{(Np~ElR@SLKZ?{Q7Q z^gTDPUYszmsLgf??6#c(ccVYrT=1x__;9Z#WIC(5+CuNr`@NyF!)Qi9)r4S4#A4Q@5afohS1B69-p3lfcMGQJD@*C8`{SA3t^H=d@?Fo~XTon1 zA9*&oLW;LX6i4rpK?)49Lw$CfU*&%Dj)W`ked-zXlj~f`^POGc`n^Pbo{X<^j79k5 zxM9KKYpx18{idu|wvDYcxcaZ*Q6-*zHk+PyUytLN~< zjgX5EYCKPF5P8!|7UY{Fi(4rV-dx_XFEpx~j7X>Z@bY874sZlB+ zg3J$F{R=;d6gbUQ$~uTDZ3>fbGXgAhq^f^kj7F6HE=mU((=?0zKnEd|^+ZUR>83Sm|a*kinr$*Ky?ory)j)@*hWH?H#1bCMBq2ZOeh z%7s>7TZiTnzUR;U37CKvvJ~Qz?z9)CAhGxYv|wl*_P%C723Fh?ar(g!;wPN1H@j~? zk-tvUs#kC@M*jMpNdID%5Jnrh;Da&8-pZeSQTf*p0*FCL+Xv|w3JNpX_l;i)KdwR! zD_qR1o;(x`Q}6t>In$vjVpawf^b>wuMHg^I@HaycWCZ`*j$LOtHfz8Xjeww9M==xeWY z5l>gGQQwl6;yhTywo(~@rvg1X?J!3}%Y5F35OBIjbLNBAL zE2q71E=3phjU;-XmUdig$O|Kp;_v$|gRsO~~mONR!t8T=-G?Qrb&w<2pd?I2Sl;e#qJ z_V{m3!S41ibLA2IC-qLBFK8!RXB-sY3wIXMd88Hh8zA+~j7qh&j(;@dZO(gifNi@d z_^53v6zpy69(+ErYxF11a%3&<&?DR@roau zmpcm8iTRR-drcHQhwD)nVhl~hn~+i6ZYz5`LJ&C5D*&g)v{mH(tC|d?!4P<>!EWqj zQuf$0da=Iv6`>%&Z>J}dev^Yk2-x<9XHDXfz9ZN$e}@4P1*Q;S`jR$fjDAwR+gmK| z>AH^aAcrV0h$p`fID7=Wrsr**@1k@mur!A58R~Ln8it6cfa_n6O^zi2ooe`Vr-y8K zOJ5I2g$$327Ci~)S9VZy9|S_fpY)6Kjqc3F>A7L;znBdPw&?sy#wC^#m z8xuzA6f|UL$0OFt){uyWHk!$r%xd=~v3tLaXdXj8)IAvKJ%o4UKKRth&v~HMHzM8l zhi_hlrzm2}Deh%RxK5>>Ogpp-9>$mlS8HPRD%wMP($PBeVxDKnmI{nnUX6oTN@47h z9_t!{8Sf}Nk*2^+zh!F@ib$5u_vO$4T!;MYOI< zJZlkO2_(4^khUEL9EoLtb2&OYfG^Pe!5}f$~61lGR z72fC$^#f-p&n-#g!)XJDBYAABG&V2?op94&wbWI?{e!_rgp}tWIJH8};mv;0up~rE zcz+f&!smqlXp@|@f1%EtD9|Z%mZZ*&(~*#ch;(st$sS+v){kU**EUzXtj4z{Mo=AR zjJE$;v@~GI51$t5#ZE}>+0Sy`P4up!Ujn9P>R_-Gla()R5qiD*XA|J>y#^l7e>;Oa zOMGJ;AZ%y%^S1!L1XotFyzPX{NY`>va)DXE8-!FO8G!kQ~SI}x$uDDjM!vm5PZ&Qv%fH{ zvza1y%EiNAe4Yb0g0QhnT`TcyLwZn050)5K>N#(}tSp-*YZ}2%rIemaUD%icH7^@h z+;N4Po`aigfzHZe{+|vnne)xo781)Yw)(2MSB>Aw5Jj{~gy`edF1gM$_Xp7cJ%f35Avn9J?-!{ke{3#MzX=U>5(|l=z18vm!^G=>ko_BVtDn_(RF5 zWhblOxC$ReFZ{3cmIBB6hruZ%hBKGC52;aTl_LV9RgC0f*> z-;veh@g&o;-r)=L{-}Uqt(<7bS+sKrQktRqe=0o+I`<3|9d-B>vFlz?{#Ffiq4Qy~IJ_mFRtpw86UE6$eXbKS*ZjD!8r*98E`IMqTxe~qu9==1JNAH2E;c83 zS9NBU`x%Gdxupi_`Evcf&fvb3?`r_jgt1HBjzx4`X=CIaRi&zG?PezmF4Q{ z_8Wgo<0v8vOE&REpywr5R44{VHy+j}pI%h+*>Kv$W}Eg>8uM5EiJ~P$_bqDJY-z+t0eD3KKOI1h%G#F!-|QuCut}^yqvp*n30q zW_?PlxmO4D%rh!W*x={BN1GWg&Azh5=-}P=fr_kmPI>@?Nxv_L?BozJ^UWnOl?fN9 znk==Q6`3)ky$$cdG^O~Nl)UvGIn3$@esQ=Xp*YQ*|J4X7Jne2Xqju-aA#2o1*N?Ub z1eOR|6TSzYUD&jCccr`QW~uTb(Y|>cVY_sSXg|+LbcTtIVQ_=LD_2S1y$>f*cZ&%2d)7gNHi{X;4`y=y;-ibX-80+~j zdTz;LH;1(alKs+-jS9nUZL5r?cHA#tSn+|Yw%_O%v?Zy#dSR;OKQW$G1pNy* zsrEU(a5S-6n4`|QxtFGG>Ww)z_KPZAI1iQyE6yzJBa@MACoa@y2zq3>ttu#F^Mjw& zK3lynR*~bg!5`KZFedr>2gFS&gcZ}ssQr_=S~c{m-iQR-M=@~EX|s3Jfcc= zmS0W@>>LrM4a{HMKldE>ZaRqRl}a!fgiepOR~8YYX)@&6^7i?>zU_x2=7~xy{uvjk!st(v?$X=U=nmwuwEI7>fo% zGS}-prbAWib)_oNz$g0gTFup4Uv>n|etb%%G@W;IpWFRx>@X-)1{AW_It+5x{gOud zF&p+PeIH*sEgjfQyQKwFny{aN$xD!)`OvbEAUsjrsz!7SaE#273P@Y8fg1h{remQH z#ZFHIREtWEW0X-92v@UVNI7Y|(S~Wc4DFw+D&n7F7oeUp@|q}a%x zfUsBRzN$GF4ng9Hx&&o%<7_QdX5@&balIJ1LImf>--wPhfAdA_40fpGE26iF%n!c| zb!q`uP_SFjDl;S2(n1Di_C^@IO>ZKYf9w}&LgVLBcOSddjSuQ9bz`5Jn1@^gY~d}r z|D1+Q!I>tDzdf3nC$WUx`eZ4tBp;Q?V9`=9_t(WBro0ZRqI%wNW|8K+8|%Tb%6$d(>5S=04L`78;gj4DZAtN)qfpGv*jvM1#JR$mdf;pdsw45e zvA0hIQECx1JKVSOs6$_Wi4%s18QCz@Dqo*x`IAzrU1A3u=!w3p*KSpN;^ zMETyXWtK--rjne7l_b!T-qCAn3kzy};SnB}*vC`e26afeWeZ{y1SceXP8g0aZy@?< z%W{a5Dpf)isKSOEj7&m+?-f!16GJQ|(?y=C`erttLZ56L84I=jsXkfsx!l_IrbW8= z>vZjD^D!$_dHB@mH!F|xl#^!<0!waly~ni{-*I>=HVW;}MBS#0%O76~{~&iJ{-U-Y z^u#`{3KRKU-|)uCF%+=vUNP7QZ%3+^Yl0Dl+>)lzOSqb7i3AN(Nh)+y#t^r{bo`{? z1?-Bn>p#Xvpk}sdR)NZ=ZVuH4qdMl(5~I2TJ7uT#_FiA%@;f#%`I`59ptFgRcl6{9 zR9E!jVXHR>hnvkJ02y9(0nExohxBkpAQ^XcYxpGJ2Q(x^q--qQg+yBho_6!}j!rrC zu_i2TlmuJp@R7|S*(=VREn9P&I*Qv<_u9#eGQ2D{;q#>lVi=&yi!d$sfI#Vkj!}{t zMj%}QmF^Is3EC8K%27;&?Lm^sGmafDLHv6p`+Rc2dZ2Q{*ucx$_zBOLX2xlUEUg3= z`GxcD><@ccizIt+5&GgAwb<7+>IN#ViyXn>>9)sW{+2XmKgXq&IF{-@!_)j68j6YE zioSnRevZ$}O@6y0=5+9~CSAr*cyY?1@$PxD_I^Cqp8Pj1;xpn6LJH!|fTSu|wB4q- zm4;Vev&(!9Yk~u#if~;xX**{xGp9I{%!sxW&Giow51y*2Lk~h#rop(`bxV?W#atq#m#Im#-0Q3PKo#(WZ4fj18$o#chwNrvpe;K3C}^7Gqak3Kt4IMW zMnj+!%vwLqgf&ymTy?YkkyKea1QsIO46`ohOfJSs%@DY-E+g1LHP!qcde=zbQx9zU zIu*NzKpfx58L7;F288d?Hr9GuTM05flRNT4?|56~q4~V@3jT6b5<^iv((aj=_ zMuV(|fhH!Iu$M1|!t|p5y4dl=i%HRQfA+0YgL3LykqU#1WNzYUL^jzhz#Srx2UJ-g$Y-g-)Veiiz zf*{Sk3|shK<)?fS)jqRt80@3xjz_ z8$=zV(mn=VQTnzb-<=*S*WoH|(8h@<6{}%KT4sSZi-Yfv5tA%I?2W21ce!|OBR~2R z=lkq6*~4st00TaGQCS_AX}D75>EGCSi|^V^MCak;_Q@Jk%#oXE4NrL;ma2%p|D3QC z8@+|!*Vr{({-b%5mCxOsBSPCcZE3el6!7I|_8U04x8&?yRg2t!~| z*lC$r?$s>S&gxQeDN$||^1^;JZcc^FWY8pxrF*=4l#e`2k@#m`{RdX699)+A8*ZR&zf@n)gWa{WML_Vr*U39;-VjlK=6;DlxaeQQg$ zyCN(|=Pc$M$BI5x#dQ=BHl#`~4BY)wmWR12z0Xea7;AtOGmj;C;K4+PfogK zcXH+&WTKR@o67v)qEAk!8r(3UBW|g%Wy8QK8q%z^EN}L9${U6NKxgsU>slmB+4I-_ z)uXJNUw*}wBMHCeq4>Q}Sj#EH$G$0a#M@%-Ij*f;koEk0z>z;_MlS=mFR;?MjQxC^ z@HU1i&ihc;)t+=P5XU6DB{hE`oa|fN&Ys9K)m?xqp_f2HR(Z^KUhk*iZ>A^mY6|a5ZF}nD@@Sv_?1VGoNbs1= z@S6BqbA-j^3`<;SXIr`pbj&{od3XtWQpx!B$-Jy4S$?My` zDR9JNAKp<$0)Y;70#2NB;yY)5f`_iB#vG*D4=62R)5jD3x9`7q9@H26=#E~0xZK)Q z{w-6Zu(`l-XrSV8fy9?kCZ*4t_2>S>o3l7Av%4qmOLM>J3yORU!S1k5X4ZYrkkU5e*vR7D%tg^9LR_A#yJEmT2xrTa!)!bfmiX@MN>=hcB1rZuYZIWO|Woz|mynKPI=ywY4U zzu;UHFr`Rx1g<_BF0CguX~^N^QU?1VWDQP}sRZI`po&M#8or|e^uKY7_OQU$^#k0l zwBVYxI4_k+p07LlN)RY2WX{X%oP7#^k48Y|I&OI3?J0B+>FwFH)p2s8Jg8FFRyz3= zUyLs|{BN!l2$gN?I%bl%b01W6z12`qe#i!=Sw3uu#-UQ(!az3D)|sRYFSaWwoZ!KO zd4gx=6?zg&sv`dUG#DP?HuEyMHmn_y$?I`@*=8<2aP|y$n(jYyBOF2f*Z-VU6g1^) IWv$--FVK%qT>t<8 literal 0 HcmV?d00001 diff --git a/web/src/app/public/goalert-logo-scaled.webp b/web/src/app/public/goalert-logo-scaled.webp new file mode 100644 index 0000000000000000000000000000000000000000..0d28a2aca4f24a1121faafbbe40559be5beb6254 GIT binary patch literal 3726 zcmV;94sr2PNk&G74gdgGMM6+kP&iC_4gdfzqyP^P=1|bKjpU9$>+N0y5itR-M#kD^ zV6Tw=!j87>Z>iD#zmsx63(p2XW%x1l-{9~6-~Ywm4DM@Bp{alx(5uN6P({?I2&+&O zaOw(Rkf>D@ktu>Aa;$(s#tNYfNC-&MCTy3ZB_YF;b-{By|_EXAoaTQ z6FH6~Nov!8Y*FdR;d%iD004r`Y+JXPY}>YN+qUgL*-uQiZQIHdK)2d9*LEUPfplii z_ItLQZO@kXHuRDAid`hdQY=MYxR^$$gGD#TVm9$#f-*vtATmNWCH%MsPA4ZNa(xl8 zNImh?a5@krc%R*j5=g0ooCuYmoak40L%9NIff86WKn1cT$u@0k9NV^S+qP}nwr$(C zZTrl%j(x7*?;psmZClkA>%-ult++#`sTM%yQh*bZx=TnUwDI>M2M_=PP^@g*wrx(f zZQHi7*{;d9ZQFM5n*cl0|DT_o6|d+2sQ=&Jf6xD)N&ktlbE>$^q_%AvwVk|TR7T}w zt6-~B#?Dr;ZPm7o%BgL8t5e05wv4UPwr#UwX;f!Y;|kJdnBO*6=jHWWWyKAAdL=)aK|_v@PtFGV6hCbgl*j5 z6&uKcNHYMTj~TpS6yA^q4OpPd5SU94fC*}Gc?>bYH2~~ZACu6UjlzTw%J7bUaQ6i$ zGVz6c5Mq?mJh2VW>^5$rPFXPzOI%?3xqc9cFNQjXp@u-$MX-WP_Q_UJPn!M~!3s-^ z+0!fbGpEV%i7~s@R|M20&9W|*{* zJD1t*j^g`e%tvCvo^=+mc!{{0S#)C)Ea;voqps8J-wZ2uQ^s8Q2|Az2qeewN=`(p$ zNetLlnzDPU%jUTliYl2+zkjKU?mEJ zw1*!SF;omM?WCLGB25?OkxYtO zi#IorQ-xyegHg2rR$^(x>E>u+2$5Zvc3H@7ODUz;UENl0srm6j|41tihnh##7f`b6 zkI)d6xiqqb{@VB(B9c>QR&O*!h$DR?M*N_>JTsQ!w#d+(l5?`w9Sr+W%^--||5d3r zx;c$DQVOje3qpk-mfdY4OWq~2+e})J$d+4E=6KlMj!F#J8JdOh(nxC#_7il%t@c~H)jzyqKbEFEZ+AIf zUyC4s+<4IYi6+K~xwFUE4Kn6C5eDmrwBU&OFkw#uFc>q!Gyf{z>=FxcVesdMORl&; zJEI=fxbBUPR|^CLC|6wtbPph46L}f5hft~lSO69S*tYO!YgvKJIHjUH0hqKc~`Pc;xu6@Zo|M7W!61uADVAKuA)Z z5(_wCM4CKa)ov%QVE|KN63-&SOeLaF_w81HS96dZdVA)He+M4>_20tB>o|7Q**`x# z{{O(S&*!E|JFYNDYlj9u4=OcaMMQiR_w99B1Tp{u2|RE?1dz`LoG9}0CZt6mMxBw2f5rzzb1yt#7@}ye%I=k8*tjiPn-XEm5v>QsGU~mul@63vaEsq zkO5KzN)anXkZgfWl}$(@S<0}APh#SsYqG$8C9~ur)ovE{YRxJ24Ma->%KvCSpTizs z!s@D=4M-#V!v+G0pR&y;(d2imvOFn;2p41_tVDjS4r}LyIOkU8B14>>AQIn=AW~6Y zo*xF`+9_=~D3?ab#jql}rB%R`iKUF1GH{9{QxudU!R-9`D#4Mt8g|(9yjspXl=ETo zjoHsky-J_*_IdnOQc40FgZ!Dh4&6{k#JQvC^jLJRoU>(S&jkT#&H=`BFuWM5-i_U5 zezKr;TG#CsfA(XwH!)_HHuGf6QU*<#T8b=ErkWP{q-A+2;?9=!FXd@o7z<&szxb>6_R#Qkr#utFf0M(`iG3>vysj`5Rqx8$1a9kYqHyQwpFor5ZeNsj3O zInKO6Pdzsx0>$Q)vC@d>dI0@s%ZM#h>SoR7=hNz-5fMLTUfK9`LKfhkH2{jhL?t}3 zM{NPDh{&63|DA*(Qiwt>4w(X&lhJx8VOfTdu)HGwN%T3lO+P6Bav3zR zI~@IYs>41Vvx&C5sO*80vybWX`-;W)(^~E3?9#E$4@*RxJetxsam3`O>Yt#`=Z8Al zO%T;nPd$*luFoKq*l)~8j-pOqI4F6LHHguv8^&BDCX`D+7%12D)rf8B)3-+0lP2%S z_S{smPa2SBoMkKq->cY}YdYQ-O_zf<=ZO9G+pl~b{bgVAl-g7zDbQ+^BC4)Y5BPM; z6@yQy;*kX`04j8y-tOCMv(4_~N&RwS>%SEj+Y;Z(5B;4wygx`D#<2n)tFxK#F8t2b~CL zHd8OFtgNh@R-GI8EI?>6|0@!;W-T%(`H&@c;mjaZ4>KjmrQ!8l5D z#fwz^<44u}ml$&b0Kl7bt5j4)y7eX!PpQPDCe`XqB5*bTHxS7Jh$`Kl6zp(F;{*PgIx2<_(QRH^LpRO=PthZ;*0lp41d~VMHZ6fN>Nn~ zT6b+k^*c3yq#}Vr0J(!0UH_eMP*FqALvI-VuH<^^i7P0(}F7GBfL> zniv$WKiOkn5dpzJnlSkWIbcL!q(1pn6PT*shwhH=>t1CM(fZb=&qp>Q&gENa$|$8W zM{KiDl|f+${ZU#7D^YCAi%z|X6#1Y-H41NzUus@VHPEfUh%*~oQn=TQ$4^Pt2kk}#`nAcf{bPR}cqFzz`|OYXn7H;oY@|*^M8(X94AAB; zN5}Jph=>hHo4xix%ckP0QEJmnTesb3U>=nj_;CaO(J0v`QL2UJu`S1?>KNq}_~SV8 zBEA)bc-xpsg4LJ*PJ^S|O7j!Nt+0`DtNgtXLD@kf0J*9eNYDIgKJ`3KIgl^wlcLQw*(>r%vjKL4pq>X$QCb5KqVS0ZFh&DlH5>|= z50iX8Oo}CNI09;ZfVsK=7(TeiGOU3ejc0>{Ih52z9K{&kKO+umGoXP|f}J|ZE9xS3 z>m;=dae1BCQHlXf(RBl_xIT;h62xB1?6%m!R?955?Rq_+xTlC6;D#>zkbsm6NS^X9 zT)41^m;)`d-Kj&eW;==+lqGQJZ<(oXowNc9@M5uLnpf71sF*`x$;8peEt8NID>c(28{k#mBA4FPs1!)7wCtE}uBJ^A|{`kXJkbgSU%V sMQ-^-%lP2||Hvw!Rnr4*nVh}ID4=-NGCjyFq3G1X*Eae^6sx8O6ov0VI{*Lx literal 0 HcmV?d00001 diff --git a/web/src/app/public/goalert-logo-scaled@1.5.webp b/web/src/app/public/goalert-logo-scaled@1.5.webp new file mode 100644 index 0000000000000000000000000000000000000000..eccc1072dd2b78e1873b9c6f999bb14f49ddc3e2 GIT binary patch literal 6070 zcmV;n7fI++Nk&Gl7XScPMM6+kP&iDY7XSb+_rMkq>QKHPuWF9p|3Ai@Yn_ZZzb>}3%Q&lyv&-1F zvo9dFt#X%fMr_->0FBr-N|vjPZI?QTZQJH5V>>gpc>%jTh+Xb7PD-r{h)R2xv2EMQ zMcB5T(oSiov{@dkGAeDOTxD!qrENReWo+BVDq~-OZKt$t+qP4(JXmPjHqu}s&!0^C zKRp~rjU+j8^buUn<+!s<4gv!J1d~16wr$(CZQHhO+qP}nwrx8IZW}q0=-tbqcDkph zX2S>Q#{ciyQA${&d*6NTdiy`;N{jd2L*!BqkxyOBkP>k`)4KS;{PkQ@i zxTJ^37D3KXg9^aZu(LTu+)=(w>>rac)Zi4W3u1~Of;t!~aXB@(Vr&Il0jfa_&;Z$i z(6_+siwP;f$6Es~B__9}m_lqTw6G0O0HH<|&;p}U_KZrvrpzjSMW_Kv@Z>Z==%Egq zLn;XXz%r7$w!J&;nCZ-{ZQHhO+qP}n-m^J4+s5l&_|UO!+qAV+g=@^Y*80rMw(T!R z=*W?6Td}|24*yb^1^6kj0YE^KGC13|ZQHhO+qP}nw%@jG+x~GKxsjwuj-eCJ^1eVT zSRWQJ3!DYzA7MeVK;wo5&thYlvSL|PtZvqze}vV~YGLKGd|2`!5RoU#Ij$4`dC=>toq;zEcdN?z#?Id2CNm9u*M?SwF z8<_L~8foHs_!}#N^~L%;zx#(QlvM<*1s0RW0acHzz+#>i>2LDA$%^vNgh0*MUSxr? zycT{1mVs-7#EJ=6yDWCKp_&mUWLgDm<^zM#6sG({&|shl6rr?lu1i{7Cgtm`>QU3G zGDe7WWbr+)!UNXvGF-4^ietSTT&)5&Q_W=mnHx!JTe2ekb)e5C3R9w9gh&h5U9V-Q zO6p3JuQiF+j9mXsu{31y9Se?C8}y@hRxHFB1gwO{Srv0G%vO}YKUI3r68UD!4J5}~ zZxbrLMu&z2jExG7gsWvDN2nBqLggqD%153)P^7CB%0+r$FyYnoUP*5h$6M6aB;Rg! zs6Gl+q7aMmShTGDl2|5I)hda#nXNYCK|_JSkUiPSk{}9+-;|SLrut7{v4r4h8|bpc zv(t6QPp{b#{=|y%XLgX@@f>wG&|?)L(v~7L@=ZCVmpIxm(sK~cN^;{Vy;D3Zof{1r z39&ejsVN<<&s!B7A)TCEuKYYMYW=YXRjwR ztA+O?xZon;0#S%WNSs#nuw|A@pa(P6%ZQBP+ll8B6{OYJ4BBY}v@aj_yTJ6{r>?@R%k1uW*}WZkn)e`L4~t21C3tBE26 z0cD^aqAxRRB|5g5sb*vwiX^%Z@|~7CU!?nur!E#N-mdH9Z#h1ChiXwY?k!=Q0ZYy7 znOS@HV6j*b998UK{gAp)q=zkXiiO&eTo+Le3Rg}`OM|G@YUG-4A;-6N83kr08BjPEd$m0;|fH|V!<3hwgPhC zFN_A{$5{pdbpk+Kv6KT=?`@=m2du7IsWmmCk)CQqSwf&KP#Hid^r9Hb6Egj@s*Li6 zm*oqo=GkmhJYP?{KS>q0Ag1|a%=~V*#lD?4$&1yhIHfG)8_f>U7ol2|WTgV&LC}Xn zmw-#)lodY!9V|)U<0e+Qz?TE$#2Fv~m7wJjj{xl-+qWB*80&{MVVv!yKaAiT|DDzdoY#C;Ol`H_iQipY88|ZH3XBjvejEJZ0o+Qzn7V zuu=gipcwpxu2}&HZ&KU*k>?Sr{uc5=uA)?RsH{!fQB_4S!=#^DkH zFd#29lMa3@7hoSR02q{p2CxT+k{V#1jRSPX8&!|eR=mHJ?l%0ud^pt>pFTIe*HR8; z(Bhq^JllgK@OYZ7-+W~zzm>){n`+t7YZm!(QWHwNHqi0ZbJY@&r8IzBrUb4*4R8h8 zK;w;|>`$@^a0Xc81WBTX`*8fA5C|a8P`uw2)S^?)<#l!*Zpi9=V-8RJs za0o4VL{_nqk?lWfl^1w-*nWThbnyTG?eqI*<2+rgT%PUCUi<$2#XMaWN&ztZNdU7GRZD<)n%G}}z6~G*x*!F#fl)BNXix;4>((SUfFC?Lz!<0!wVx;k z|GEi?0!=e{&e=gOFk6LX8!zl8xpJ*xoEZv$>IQ;G2qr)_5c?fatW|>?VBvj0`FA$~ zaB);E8#x>wy)r}~{?UtLy}7B3h}fHx9*B$*rbK`nNmNs0nld;!B1Ibg%7Yyyg7g!bDagZT>`u=K&+?+c-#bhI=~b_@@kv)+v!!z zMHl!26k`OcmAQfygbT{}SHtp!VF0O$jNg}`J8Fj-9~GzY*Q0CXq-_!=Yt^BC+-ew^~0?Y?u9;7XLxAfRq~ ztDGgkP8CRtAD$v0@FD@yWzT)~>42>MfIB(1`?h!jxRJmX2;9I}L9^HgD>sBXfkKcB zu;YmVl>t|lqi30O9-aT|TZro+w~Oem+}>iv_?JMRV+2qAYMBVINB}qrfKkBWfE_Nb ztKJ|KIsu?W0ic#Cw&rBbEjUJ7wmC-!2nihn0R~l5O#rsl$hG$fhGT+r(P028Re%Z> z9htz$3m^|(9XkcW+f>cUD>m|Ofv98z9E@-Spkg`z(d`e#Rj3@9JS$z#EESF@n%b44 zlHMynb@bC@rWv8YinXr7TaI_+Mv~*NpROVVibnvT7XZ2vJT1*s|2NpP(o+;6cY)=E z4)Ayf!`|j=2^e}rKU8(NQ*@*mQw!jMNwMYu6T?1ay)gpX_d)0=Pg2}Hz5(K1%Ti?KiHA}?J>R3DMDpz0bnH@0x%kO8QKD#niW;Q zOh=azDg(4uAU1147)<-bEhvyTfc8Ai!)MK!XDut-|6eW*eTA7@$nm z%N+0G&mRsn00Wg_uA6iUp57{+tuzO^sOPms;M0z;{sua035jfsF-CB-HdFk+xX(A^ zL8uxTqOKq}EnqhQophT+%pj_5ZY{*__9d2s+yW44kSJofjy{1IQ7iCq2W-Vw1xmH6 znn>-|Ar96(vjFY2OAbPn$lx1sDBqI=z6>l>0HE@V;L2HX{G_?r@s&fY8959L@pJ%PxxzWVi^in&&*$$QA(!=)n8I}8i ziF|h|;+3UUt4e_(mnnukjAF>^+34l;ZsA>KqW_^-8b{?%6Suo0L(c|MxoFOxV!C*> z#7|#G&42>&hjWQJ2R6hU%zjL+Xg2$?R0nvQtedyKH1rgZX4);_NFEqPD0Wl6Gs$-v zUU9yeV&4pQe57)J^W}{K>mRz*w&YprAi+}<80^$4oI>o2Vrf$F4B04Q19OxB>?)$U z4q)iJ>11HpvBil>mU**yu`iY-Tg(-Rq#B#jO`>Mr48c^nRU{Ma-9Vo$W|Dt>&B!u9 zamO{EH&-fluqbDxh!K&ecZ%zz7%=(7$a1_>h*KS3eM4c&L?vW5)2QL9H8ldqae1@v zqlBLF+T5Prlm#}cWzYXRWg6u}k_%P?MTiinA84~0P`<7-4;xl_MlY~|9E)nek4Lvk zyoSk1fdQ98)kw>tl@y8$!Vc(_<@V*5c>D5u|Kv?3(0BXtbi`ga()BFGo-tbFj=Puw z3|0pQa2>_*lFQYG)Lj->WP&5|)P-UP=~qIcB`W~Pz=3d!jo9?*SGHdhU0ymcFimud zmS0;(=rN!!;o>P`5L%>eNuCIS8m@7w6a?i9|UoR=ocO22-mgBftuKKqzvT zXIM@HuaRyu!A#K-B@8b}b-Xm8TD6(TE+52a8(B%sM7e(g^e+A_PLQXQVYv7e-U#g8 z2rL0cEI@Y>>I01gfW=0&Aqnh{ni|FM++cEi^##xwMYSTt>TsQ}$xEY3%Dr#Z3;=h! z1*L{I4-t(*gWe}Sq+XUb7>J6#H|=z4$$|69lVKNwIj$Ilg&B9QEWsI*GsUFueM&lv z;3()@>gmD#u`C*lJg1Bt|K|kg<3Nv{1Wy%KckfFxmV|xnX|CPM`J9)8p_PIFd0tT5 zQZh6PaLT*B>eaa@kSagMGl@yucg47tdPC;?ns<_c77z(O|yHbR%A0QWgY-6K;>#XDHojw~R_ z?YzGqU_5lmpMZpVG{)7$dVdE?Ens!Bz%}L&65Z?|efQ0f*UCjn-UPrZR!kW;sh7*g zIZswx7>2VkPLl4>KD+Y+>wT+gllCNvsMr|on)C>v)w|Q1q&h&Z@xa|C)2$lSGxbV0 zn=eKzS=JwGhy@k9Pl3U)qq>h$j*6&vt7SxCy5R-LXF)h&`8KP z;hx%%S{#3QGbvmjPZH7k)1}IO5!hh|>n93Rg%w+$fN+&^E_%G*&dxvRd9fH8a1>f`FWx ztl3eri|gJTRoXXTZ+Wz|{8?&G0l;|fD*zi{1&nhQ@PFcM zd0{oe@(x%-EW{X`<=N@A5rT_T2OaT#!bL=GI0gE6xax6hxwagAur=8c{*T~j9OGDo ztj&OBt1*O-spr|~DZR{z%t~o^+;x(n2_i-e1vbB!Nsgq>m)vMF)2z&eNJQ#fIo>zo z1i_OfX0ZfWU*lv!Tc2FSI%F|x452W&fy&^+93Q=DMf<0~kT2~B{WGxOPju%E4sY@z?uis(&-`{Ew04zpPk)?|A8T z--MGuqZiOfAK8%_(a2CO8Ch!tA{}Eaj9kO`T4>QRwKVuMX1Hf7gSeGn+7LG4+KkH?Yu}T*n zlm*GsW=#eG>w?wXtFyD1Si~$me@LW(TgVPhVSC{ewkLKW+gOnXW{xIC z;d>#w_Dih6L8yD?z%pcYuomI*o!a9gF_9KnGNMm;ftnJN48Z!lL3B-Zw zAXqF@WEI_kwc8=qt$&1d^S<{5k;XzH+hi_S1M5(aVF8xF!v4|z{Ua=@AF)VTF+rr! zRM?(Ku6!~q_&h<5W_;1_2jhfR8B4;g>#01G-)0dU!{{cCgUf&8> wZKG8`P7(JFm7|dxtbzrqzMMjLFe8njGy60xc#9>{h^v*VPk&hO7O?tu6knccc>n+a literal 0 HcmV?d00001 diff --git a/web/src/app/public/goalert-logo-scaled@2.png b/web/src/app/public/goalert-logo-scaled@2.png new file mode 100644 index 0000000000000000000000000000000000000000..3ac9e541fce526d0ffd88f3851cdc004d110801d GIT binary patch literal 14448 zcmZ{LWmH>D8!hfmaCdjN;t*VmySuv;DXzhaQ{3GO6f5q<-QBIY-MruZb${HIwQ`b^ zGiT=6GtcDAp1q@0m1R+p2$3KlAW-FiQtA*8kaFP9eh6^jcTVgAGVlS$T3ksS0-_-i z`OOp-{GHq!sICOQh86-M_&Wr|3;0yJGX+^G zh|m9CKfB6Oz-Qn+(D{Vos?d>(RA;+j6I7rEX!2A21qThs2=R^DEG zEB}(dYbGZdm8(-BAVh61g^~bz%=+pIDW5a|s(qm`p=N!zjsfBrFaYGQV&Zzi-?_06 zR=)r1aP%4r%-bS6n!4N=o!?m5NIiP@o3tNaZ+E;xa`34xhU4qnY43886bEU1)%qDdW_BLgJ&$LpF48;zjnmikSqX{Y14v3(5*W z)>7cEVpR-F8#eE^-v1DSQ-s2WkcHw3kqj{vBaZqv*v(Iz0F4G+9HM0+bS+p9@FCTK za)5Jqg6_f@uSC)y9lufC_SiPqw%Nw$JUh26%`$ z%+MPd{0JEdE%?ufa2e47(Wo;723FAu90md=h&jnmXGz7~;oxtORlzk;qd7x}#r-?<=;8Q=JU~TrVs<=!On%6W z7z73}I3K7d$ZPH1>Dpqfg7XI8VFPI*Y^`|2lJ2-GcHzD^`Ij&eXcu)7wBe9Ms3EA> zU~~v)8D`~R-0ikr?~wQv^i-x)5v)<-kbf9wH$+kBSOdSiP@iCT6Frtxmp@T=mTE-9?4PEP&y`{xK1&K# zqX&NIenvJ#^W=Z8mvCJVcdIZhRP1?i0Vu^Dr`c(DQ-W2-4mNwDez zxDhox_c!No1g#j#9HA-_@hkZgCBuZs?K^sf`xyVhWN@}uFl*QSA9&f~1AiIpH zk%bm4;%}RAIlz<)+{a`Xoa0F^vzs``BQC<0i!>Ldk!^#cl6aPrdNq;_6;`pt8!6Mr z93!`uw1zO2g=r*tLZS#^UhkU_e^Tkuox zoAk{I@;D~M*DNM5obvA!*W=e{UqjQD`WN1&K z>~}7x=b{4|meIeFfZcOp!Kc^6D5O_kuVQ+NhRU1mRr8b=jiXlk2>d}OM_HEWl}wd| zjK|v8<<7YnGM{uyqiX?smWyb>y(bUsl5a|+O3Q1Rf;#Wuk+yh+dJU2cwr63p|h?(ZD@`2<4je5@D zd*HSedhejWQJqg&*v{pO#@s0tncafoD18KeMvhv}Qtk6DY26LU#Y<69z$I5qY`{RJ zro1Qih$VKGt+vz}fX6XrnblEfBS#RwmI;76B32VVO`N6HX0&|D;-iv1tTpXsZXRKb zdTYCY<++N{U~7CU1WqbQi3EyAsNH-;DxA@71eEZun$f~^3!5E;YX%dG>qnKac!+C< zj+%3lNj;FQbYSiQw)4r~8%!C`DZH^&C#twz5l7M{4ZpFgv-kYks8s>m!Im%0dKo`W zYvG%$`B3wS8{)2i4u$4x*;(r~&_c86Wg@kAFDA<~pe5>da|)GgdIx3W2?^KeU^Ts2 zhpA``a59yYH`GryIb1K>K!e_VVuvpgxvoJ4ze>(V-7Y%PpdaIfh!bK)8|a;-W3F=I z73~AyIl0P+EMc93i-Ox=dwLMwynTueIGF(44KAyx&4RwB@<+M9a&gANjVrgRdzkWb zKjGpzwE5}yORZ_xa-L!@fRZY3&;(hnid`(W)!~6z2{SvW-+WHl(HO|coA4m;UW_Jk zU;SOSD1^=qrH%J#pk~+5F{@8T&3eaJZ5(a-i=}CxSC8gY(6d->coppgIriJGea1LQ*j8Z;N`#f;YgE{zir4LCn5?(rO~ar*x924oGY&BSAunwg!LGO_0Bai;Tx{=g9F((pB)?3V57Nk zy2m22UyX+pniOtIa+ie;w3p3%oPv^BY#!1Sx@Anoom*yLHJus&M798U<{ea;C;QDb z$SDaFU_uYD5U0!&=g!96;uu|j6P+H1z(kIGirGuaYNO$vpOH_r_8)4nToD#Q$tvB(K}8Q*lG1o;Y;o;IZ_LIrVWp<{I^eWqSl9+8kf7Tv$*A%| z@@owj9J{vk>CAk=&9Dng0sTqVO3#30^D+E(JdyX!68twajY!~^jadN0@SQ{Sw*dpF zTGnqi_=&=}+Cx?yDfQrjD)3UcXF=%Vk5*y<&07+oPwmk4k6#5-9v2p^t5y~5sbA`O z%~rNolqnW_SIZ9~hEUc-_)8?c4>f23!WevBWy5TR>9k1%U-)YEzj_G$s56dkt#t1} z*)4)OR&|;D9;iSsGJQ5PzQd2;10@I_BOAFzkMoNFRS_-7SV^_MF*!Isydgf~%3%3a zDM({GjM+-u&ctxww$rbFzInF>u}%ZP=a)`Gswl^Fij1Bl=P|W6-BsTdK+v+EVarrOYhj^)fl$6@B zsmYb-LE2(S(N8>7G}n)Mkyo$8$N%>G{-)fipLn4~xQhleORkQ^i81ENCe66Zfru5f zHD7Tg_>|k}*=0b>$D@krJEcs=W}R%2aEt!wTf9%_GwHECW3{a7AEn4tFRu{=nDyEsDs`n5Jj90urVpURVr7+ZjOU+Dr1MTLfHVGhMfk=( zOUv!kIC90k*J7N@F9^6nqO=Mo#Kffj{96RuUxSF>1v~O%fQf<>6POP&#GF zn_LJH{Pt3@Mmd0{h6qr+X=WzwvAesSr|)dShDB3Z+3NjPWjuQUHtdEeO=~>Nv4od( zTr5>Zt(IyMiMb$LBDd_;b-k;m(Iyn0YxM~i=Uk~Wcb1+7%<<6F!+A6%IG@JFYhaD3 zo*oB75&i{}vVzTJA`uv@Bm46QrM#~7$YC=al9f$|dqwMd`cC71crgVx7|w`)A5R$S zCC{qzow`=!X}O|^4q{MVpYfVr%GPbJKanhZ2Yb^14=BF9X4_lABwhSuuG_;S#JlS_ zHv8Q|$j_u`K5&0KrU>EPlW5K3QyG%`PdW-(A1uaOINuC%Gy^<5=UEMiTC7DKCJGB= zlt|3*^_;&>n5>l8ZtSn%>GQOq^9I}w6q6E3lh&xr>#gzx6fc+?VYM=huVy^G|LJ#v z&Pkg8T{5X^zrUIXnyro-&CNBuJT5+L(Cgp0KXgW_CEC4{(sxPis^InToRj}BpG7%E z!CyGII%m5zFu4PrXdO}DnwQv^KqXteDsMlI1iYuMT#x`IGR>twh|H7#d-xU|m+xK6 z)e;7z_pqB}QU~ESXfSq39WBrGz*A)≀@eFwuS|erjM*D$t4zE9|L~@m7{q|3;qO zo6szsd-m`qb0e$6a5VvPi2gij`zN2SPQlKk#;o08b2HLdxwyP8tL+27uYJeTi0 z!lCn;a%l}gRj09Nd^gRPZ@7}1en^=jljj|e&J!!YQL8!&Pf&3aRP3~5-${r>UZv(^ zpc@Demet2N0dk0^vVjvgBlPM>r}7J>*0x5%bKv!Ml9U%Ob$ltSaW8>x)`5?3&pUevQidXJhf5<>~?;8CF*Tq7_&!mXa_>_SVDPS`0)nrMK-e;A?X%yN4#Js z>?DyH4OXTba-)$>6wr|<%vtwDth8i1gc~DIC@{!8+o^S$3C`}eGQOEzR8D=rh3x1! z!*u-(KpfG`?hi4hx_kOI8bX(f)bM-HlJygJAhP8FLI@pt5@p{sEfI!7p)SUPM|q|s z@ZElh4UBmoo>-IF2>Ez+X<4TOJL)`sEa~h>MpbujN`)0`*GttKRYP`zws8cX1HBIN zV&T~t{(JkkqrL;Sn~KD5apOHtun!YCf0DRxO_hGwk3vxe45r2rRv~}G6rrW@Ehzr_ zK_Ik1YazWz85qf0<_yC*j!|qq0)KbjA0%h`tdc8wzl=Uy#6QKIpyha04f;sQ_Bo%CeOgfxE%p3E!A&fHenwQEzC0k=Fh}cp zwc*`hJxyxlYl)+vUHkJRs=Ykt3#OWhyk155!Lm=9QaAVRHt?|@5p81~>B-@`(xM42 z`lr_E9zh&;pipTddP9#uT5WTi+lXFX3xwX4Mq6qTQmX!Tp|PsVF?;`Zmm7IaiPf7< zYumZ$+)%ua(RU>xzDCO>Wf^5bK)nt!XsFM_WVywij!HJ6xTPgcQ7}G6X2h}7JVY{3 zI@j_LP029#7er+!qGKf)yslf9$;^&dWjHP2C8tK<~P4WlJ~xD+GHwt0-V_WhI2)IA)Xohxl*#JP*`N2o$$ed zLN!JI$N(({&Q34var`NfE0P?Sx9D~6EfHc~U!>@7?}+v&jMhN@^+>;}p^E`tiF>Kr zh@i1p6kSkJNF62sm8Gu;`2B@Liuvw_idyXM6Jxv*-x|VMHU{SW$M>l{VySCJOMZ3v z>*tU!xU%;vOHz$i&e`axi_I*CqdEsS&*R+#p}u#zZ-2~&<`t~9hoL8qJ;7Ql)e>sNuRh4t0G>}RnjP+cU8Hk-^ugSg?8cSB11^j?s69=nU-yP9S95`)=OsvYdBLmNg|YC@u>MC)FuxLO6#&|?oIJf!{xhj{Fvm)MSH`Ed2OqYdf@ zMPIZTWdI82dO)i|2)*0fJVk|d3-CKfSd=rS|ir zjrMc4isY0AcFNuh$&Of{%ywVUh5{i}lk>hANxTDYpaolc>TD|K9!>-*ws9#avZ8eR z(w?%tMtV(CMh$zC#EodY)p%3B(w53oc8ccaR{r7bgyQ<0Rc;|G0jJ9MeXB>zV}in> z^@_a7kHwgG7t67dy}>Du$fwU&`FDk{Sf!X3VM zFKNe%f>c_4kd^H^h!}DEEe&$yV~h&7DAZiDa63U5W575q?ua&NQ5)uen8{_Q07`8pm?S zj-f?ev#XXDcS2r^k+Nq?$l=H})JtAYOY5DQy`)@^n!q-KKCd806;aNkED$ITCPIiw z5%UdArb=BYc$|^`(pWz)c(*>{9`a4rDF+dNo-qNv86$P24cQ}ED$i@Vw1Hfp2~JSe z)l}F^$>blRay!9`wtcSEXVVnkQ@$jy0T8Ys6QxyewqFtu68!0AS*~R)YJ&vO^QnS6 zk*Y;Q0?Jly&6_|!ciVlZ42ga4+(^Fne3~su5dY$Ry9oG0LWt)tApf zzg*wnWWRZh&oi;_OUxt~#wKkk#gMA4XM;7CtT|v^ynxD*#iZA42vQYJ2`NKi-Z)%g zN#PvB@-T$y{nMCspr3hXtw?89(%2E6<=`FN&Q%sUjBOtLDV7&(Oqs!@7xh<(1GQZz z_J+8C!i0v;2}th^-8SGXKj6>6TkiO>5iNW9T$#jFrWFPm|MqAY!!M3&ghy`$n+ zUIAT5ePOwW?&6iISpCxK#ASv$+4D8g9BlEmx=uwHRX z>LShJB;1VYC^JzfyL5hl*}9&ezbhJ6Zfm;=RMv^18Iz6k*u%V}ST<)aZ=`A{YM$nj zGv5CZAQDDqb$ebiClVx&&6=22*Wq6=B`8QlQg$yiH7!b=S08Zr?_@<8>O0?4k;M21 zm)TWs6de;L<6$dQkyCnpVU?(E;HA=l`(|6SQIMJy-h4I*kxwyS*@hyo3#y4iuOloz zDX63K;6j&RPD8G0Rw3^mucMN1Wl5@VDbK#A!R-*|0-BefBauzS30QJFl`#gfmF)#Ft(c{O=m(tT~I2qVBy z6DdSE3Osd=#W|5b89(f%J6XxA=_tj2m`PUBZXW@^<|=u3)@O${^K=LQ!8IUJU*2P2 zWa4#28%((-EAVF-=A{k|w2Suq04#6dEm|DE5{5mIBOiDXQo98rW6dvztt+Df*$*hG zuYah(+kxbT6QS$uYnTKQ1f_l>pD48E8^hdiY(-kATA(XZ2$WqhUO#&ctOllEvSxFO zw=~*unowqH;Z8Ghr^khc+xqY<+Ioxrvzz~Tj@G-obYG3t|I2vvsD42KP#<{Q(F?09 zc64Z{jsHuN;fjo`5hK8ms9pGS9l3W{`NQl`k|pbt`&!@9m&zOei)e)pi`mND{ntf^ zvECkT2Y=a4(r3l^_Vd9EM@=k9<|Xubu@l>{Z2B!eT*>O#O%z?Pzh9jg1Gt7Uv?Us( zF7QAjH*>HFdvXKX{@CJtox&4R0lCI(ttkcXgwe&SIFxc9dLVQP#Ahm`04LI9p%*c> zDN6wZv7Poi0&0_6mIpMLODR%dBJ7p1{72+cwF%Kyxl2!DNCrC*2!4vI@5;90mc|&m z^?=#98aY(Dw#D%uG_>IQ6#ZIe47A}r9aVZl@or?|d{)X)$$WVVC=sf=?3YRJ_;(I z$cuK43v(v{pD#wF*Rb~tp^a(q^A22K2AR$r#H%O3+)m1}6NoT9EXhQyz+9A!VW+hK zh3V2~ppbDOW<3z;!I~zS$HgjSdVwNnsxsCh0q41ZGWG*9)38O`$-#!?0{HV#2MIlX zT9$c6Nu)UjHM#blgnGKgB-!WqjZq%nRr4w1B^eTPluackq+!;M7t6xUcy69&OBb@T z^r3>!VR-U!A+t(!bN9mXq6@!C*vNkQTz56B(E?XtfDxQbWDW~)6Sn$QA%h&_MXt|z z!UK=x73(0=q(D*5lFYotHxZ}9PZlSqmC}n9!fr(7alN#0t>2*~P4dr)kTrmzHV4PW zbPj?^0>q1V+TCxQu#bNAG_UPgTZRl(b;$_!D00KL=Y!h%BaI@lyOOJ)(v%}&kI16a z@qu)lC_-($9i@~rM{DxuRWfccu?Lu6fF?Ej>v4$GHK!Al`643x`Dd^6!A%GaId={4 z4ZrS`Nk>LQHWDU={IH+v%1)w!Hy8Tf-*B*wPkB+MRw4tlg6huxl|*yd%&5Btn8{>? z`clx0BS|(FL03JUG;!Qr7qx7Vefa|Bj*g=B`t32tai<$K^u27CPj&NZII3e;yErxY z{L_Wb0`6z_{aJ6wiQ6Xo2Dkw}i~LLpz%!$(hTSpE00llY`(?&4Q$Ghz_Jjqwpx41$}A z59v*&7gF)nYwnii!{hbmgY=6bHH)hC{#qb7my34J&N{Uxbw48%(W1dCEc@=4GQqij zZq*9^D0@Rwo4-#!?4b1!qM#rz6l;oQhi)wpEy`Gk;DSlrAuxF`W)f0CFi10rIc2}3 z0k7*M7wufFf0gX;{crD2dPPzK1EP4i!_w3;7VDMU?jO`88g38a0w+b7ee?rWnnna4 z0x_>ehw6ePhbYvdEf@+T*ac`C6IGa>dKBDPfD^}W?E&1B&&}b>;X3noV*&AnzqY5# zT+s6*bOt19Y)Y59fjww#J)EvG)ALyeCRh_5jP=E zxyuE*IBBG-rqT_)e^RXiwud!=CD)y2fq955RiFqH*ERn;hFEdJl&-2t^8lHJ&)Q9c z_R%JDZi9l*3iXxt)$ZNvo$f3_ih2tic9xp*mWeY}l)GEa)6s&oS#997961bX+);e*oAXEEG>n3WD)Vn+Vxu_5%0alOZ z0mUP*c*C7SS(CID!NX;o318>9P6O%uciU!uI9sHy{shX0xXDP@`!W1=b3I)^Kq;W)?9wi8a!Ccr` zn5B=B7ZUg;dNMaBxQU;(*K9h(O)PSQmOSZbj=J+U?c1qnN~x?0yo}=2Q7q0;ixI`j zIjR zx0QMjE(-Kc2a9}drahxycWPmaQhRpshfdSYdu4v71wUKz7tJX)1d~Y>DS&X~QRSeQ zegm2u`OqKgp4z-N>d7kz4w$B#@0RyhrlP!IGE?3iChCQ)I(2Iyz(np|N+4G@T^uCZ z4Gx*Zy(;R*AltzCA_{Z-pEhNsu7H^?bF%)^4&^wCbYc2|=EP=1 z@f5&Zoh-l6u3>48uPnx&;fz(n^_wLPGem+!a>t`~p(g8CXL%l-z1RsKD3v^xMrjG- zakZ3ljaVmE9SL2K*`STqPW#O*=`+oj$7=^9S7H>arzOQgdM!WSNPFt7buj_w)p$>( za{Bjo*#=XFtgd6@D+o*S#@;pdo1er|&eG+Mc-!`DL{y34%>GJaibcM7M8qHzx3g^J zY%^Y?#Tav=p2CfZI0cqRAkOpgXn=X*ZhE{0#{Q8MQrXl$*!!q^LnV-+>TmOH-eN6_ z7Q)MBjY7_^r0QRVHz7;5q-G)D76ekd+M%wN8q=zxhxZgSxk+J%n$}zN@Akn@d%vyi z`Nw}y9Jw504V+)C$ga2)@rx7)h!j+u(S=Yi|7hsm{a!pok1fzHDVuQiSL(PMU5-W| zhhdOPO+NsE5Xt0sT(LjEKGvV|+A7o|T|P2Ez;fmAej9KuR(S`&>TejKQHx$slifku zAxBRLIQlE;M)m4mj)WyAntF@1K5oiKX%J4DkVxM2qtY`|A1$fCUFDb$bRKXHJKeg2 zV?-A%MJfD`)@lXjEugHmjhpZuXPamR0;m;19Lekwq-J5nW z^080S0Ge%BF{PCz-cy^?!JS<1KY?nKk5mX)J}RW!I4=U*gm6S!E< zt1o$;fC-r{u(}{YD@G;^eZ6d(3t8Q6p5Iu_8qkjthj>d4pUqn3Mh?TN&`r@Tb1QvS z$#tSN_*i3>Y2CNn2&Q*IP_270%--(onkxaPeep^s+%4m&M2x2!q@G@h5i99fH~jy|4`NcKcqw4^}% zF|*yaME*u-Ixh{*2|>vU-lG;akWzp#LCc#;*i~KZGMV&t`guaFPJ8L`k)-71!AQ;- zTl(_)5ytqgKMZImAno_vI(HK&AzB3I2XFDbPJu3wWQaEwm=n4|yU<28Tfpt;L^X7T z{*!P2)5GG!wfM}uq+2*lQ7yn8WT`9#-i9&=r)WzAl+kNyIYV)#U3**RX(Y{zJo_dX zQq6JQ;k@9}dt9G7t|c`7*WC)B*=_6X!!r8`U0K@ujPQzXg^QDu2J2Lqy2kUx>qNqS zXxAqHChNLHAXygL&skavTn08Tv~Y#}u&8lujG0c-XdiFAG+!Gj#(fLXIiim^O-RO6 zuRGIX^8UG%cLvJ!tDnE{yX1k?lEfiWnaCjV%AwEN$l+gv1Y>ye1%fcql|Ad78Y`nz zQ6(95@6rGQLt!8!p2Cy>Nr>i)0M&Fyhls}mrX4~&^{rvg0$?ggfmWsBvaYhu`0kB# zGV!wZZc=Od;WJ>IQpdz~;@WPYQP}QTGIs|)K6C#9^&<7vp~Ie zly76Z#;CeT1iG$Hlk*L!MMdQV0}`L$%mm&dHj$~L$VGMb-y5P@Rs{l>*{7c-w)xLu zs?U@|{c;5O#@YeXP(G+n)(?s6zJuXL6nW6{A{(2^N!lxOrcO55br6rZy8kBo+D{#h zRJU`KC1Ol-)J#OCA--MBOg)fbZf~3$`b`)4VZJ*No#ENNj-*b&xt|vEUrkmOd3{{m zA421`_{=|=13K|>eJY0uZJr3XX{(sg!h8Jm!922#qPIQ2`L>G2FHRT*PTIT18Yh1 zIrL^Ld4xH065UE<6}wGMvv=y@=1gf4|BRp>VRvUMBuha-xo;oGr0!xUZIWE>dZ-3B zb%gG~u$}qGHs5))dR413najn+t%~;9$TjBXg(HjE$L4A8V_GFD+-+dY62U}C#O@rN z`w0SCRq}K&%8HW&>#l^#t1U46w8o>wxm;Gt2^f9fM&*cNG)A+6$2UBvL$ZOCZJUG~Kj; zU>1(cgcn9`XR4rCtQN9cCUa2sCAh3Y zm_kHj=hjj!8u%b@TsVS11re|1`PIK1OSb^gjas{Od$(t3a`tsEnW`A1m$FvEtOw#m6NAB~|Y~P?)2rO=5TwrnDN)RsTiIkcjAIg|w*W8s zZ%=c2m-WYQux^IBiOz6lTo=jMo^a@^(%vZjatZU0)>hU8<`|w3Apck0l<}RaYIH!C zb$i{^zsbE)2&85F1`qnh(=X?vAb0mR$Svtm?nm`agKM*DNx}@nL<8(`#%CAhA6*x~ zst7ixn+Qfg&>% zVJ|?R3e=+K+3t?^scedk*_#6Cc8l%ShMxsVtXc@!1NA(mn-$7HnFg;!O$AN}hC z`UzrpzZ)7cLi3N61;01xMoo|9NDBz1OC8XFb{F-83XGXSjUsDUucAQw{at75h+NV^ zbedPJaK#$#s_gc6z<{T!GGvmQFDW7|VATo^QC$f+y=^3Hn62I+3kp|x>c)$lgm5e3 zAFr`SpMaw*lu*8m8%;iIeLQ<oJU*cr` z0Di0;hv2E(Y&q)WXQWy8LkbHk*-O|@YRa6QGg;Vi+zqy4Tu-AI#mqO?dShp#2Yc>Y z;``9KwNRcdt=VpB?mKg65(ghPBXTBJX%HX5?hY}czxQ6M z*{6$~8h~>4Nf+;PZ#C@uK}VV(adW|0Gqw^rZbOsd#=qJ-3{SMVX)dar7c4mJl^oM$dB@7cNA z_%VWOxn#@KU3kExrG1agEDh4dz38)dZ_l58^d>vk_IfgUIP$*@GG_ma^UL2|nQ0RB z;%W~uB|BmrX7{Tw2XW*X9$|f2%jT6e2wXL!@H5&E=pfIkUWWj2G&Q`QaGz+ zr!kr<3UVn8OQEJwmUA)8m?Tr(Iye0q|6&}_3#Nu&xRq=S6nv~ZV?#nl^RB40{S0MLQhSI)03qVDAR zb3y#APKO#5Ptx#)3%W9DzuADg;GJ{Epnos&lu;$=sy|sA9;GY+Nt!WiO3ZAxj@!HK z44#X0dX&VV0FfHA>WKfMqk1Pfzj%&=@wxx}DLdEch7^=9InXz8jXwbCNluJmyd?Pb=iYy@FAPr^$Ds^D znj$oN+6=GR9PtK=KZypF9osFFreit!BlUQY)&1%MTYFvQ20;v-jp@3Wd;jS;abTr6 z_ubba*YV!Iz*QW;|J%%hv1Rbz%KEzA@E&iX!<>tzL_i>jq)sqc#I}g9_jSkM`yfX@ zOISc@8IJaO^!3Ui0-5uTk9Ozqvf@r_BT{1xDK2U5Y*B*T*oi!}_Y#r%*zJi`Vv%|l zFfUDqy8iAaJxY+bV#{_%UZZ3e^FOiJu~um7di7^#QuOOyFgoR(^HS>bN~2FnSGe;7 zIqFE$dN-%t^rw67w+W%u-%c!g;-Ng~1E?unG3afcJP=JI7{dNCw*?3R$+!_O9=w;U z=el6U+UMpj2Uy#t+k|WTm4mLtr!+!WYgguCLD(ByMxv8Iubr+F+du?2V3PL3$p&y@)g(Xbt&_^qcRh zHP6rY@gGC^Xb>LlVwsq!L=lNe+j@d$h%+9m-_AC2)oB9%HNhCS2 zhHGWjY#Wr{U|`P`KPAITK{r2H~PCb5&tF4`!!twl&!l zeGo~LFe$V8dLX-Y-l~uWdlj4Bnr^h=*O44{;~{By!n}jG=@M zCQ|dsfB)i#WaAdk>rXj7@-r8eGbAycP&EZFN;7*eyfVkGo(VIQi^;29_5aNnghrdY z2K7Zy?P;Mt^r_f%b}2P*);yJk4~vXf0+-|3G;5@?`ge{+C;~eXZEYK|2^e~sU0xe+ z6_CTVnlTufL}6!AuAr7M7?T84%@D)gF(l{kM0p0T3n~bBeC_nkX1)y6fR~O-^$H3m zR$D8{38)C|`q}5(!Y0zDV^JI}es=UuBKJx^$oL99vC=GjSOpK>VL#I58XZ0HupeCRD&!2BrzpHrJO^o$y1sICfny^w3P19z1qA zG^s-qmI@2i{vw^dZog(EQ1hnA4BOA#dmj6+=!~D^k)7Vt-n}*!3H!QUtD1&8?_2#xX4_#~V%vwh9-B5=^K>)*iABhT^2CL|!(4T&P*aOpnzn|g z;88F2$k3WA6VFjLxufWaYY()g-3&?!dPk{HXdsDp>M?Nj=)}+^v$Qey4_-^cvX`ZB zWW}S{n2I1zbCH2%1KUhS5^)@vAMyqhW*e5_9R}8x8-BlF^H#2qi{$IHLMFo2Q2-U@ z+It;Luqczmk_539oWd`_ZWdt_!@#kkUXM6Ki`f%PDd7=RB#=1eqRRqn$&O=&m67skJb7a=1f20 zyG7xJY)6k-@NPE^9~-AEcaz+@V_-WvTI62vp`C@MUjom^!6=Wt@qML=lu1^TbGJwZ&|rCYTTQ2JzRAVvcQ5!S-?Wx&qm1 z`z&k6P}}}9{m3b+T@!&8mmsdBs}<01^-_G-`1nB{;A|qMC{Tnjh9;Ggx>hg#PW4xb z9$Ap)e_=!CvH;1g1ytb!kZlRF6?Et~T-Lz7yuANJXAIYk-~c@YAqs=WcCE&@?ghRP zynw{B_BLZN_Md?5*WT;DudE&ajixVPe6@jm+QIG^GjR>xulWD>YIJb5&`VI?5I#O& zt{GxroL*Y;zyY|NL!U!+!ze-|NEL3`pEQ!@s?37x4A@8MG?vygu9JLAUV?U)xa#~L zOi#xE`I_EUy40gU(h@JqTm!tr*Ep~dr{7hTLZafYrz`1l<376lnKNX?UrJgZ$p3*S zKJuqVLEL`R?-=v=6o)eTc{5gF8y3EzGXR*`mg%t0c)jU zT8#`%zw8*P?kM$f1SAs+*Z=w)O%LtcF`6A(_K2_k4>O@;CU8?QOE$@i<5~aL@c9Y9 z2R3H#+{2qva5JIt(Rwx|>bsaRAa?cRzhE_(GXt0vXvxAjB1$6ji_?`wA?Z1BhQU5J zG~RS&A;{)GgosYl`tQ`AWYu|PXMM6+kP&iCv9{>O^NI@PD>R{Zqkt50dcfILu<2NEEfCc@x z<&+=rYlmYDYcKX2>J;PVtd2QljC5%l>#vGx67{nyIr zzy8iLtnJ3BZ6i)?8!>C!IK#T%!L8c1?ccDrZDZF8YR0J<)He69Zp@moo!T~T#H{V^ zVQt$+%(`(hyoa^TGpudfwv9cAS$7Ysp@LPPVG*acyK39*8P?o`ICbNkVQ_}E5!5z= z+K7gAJ9Hb=wr$&XR{=MYZC8;o8IXY%Ha;A;R8$+4>FBW>XcYIf0Dg8@K5 z0DxeVZQHhO+qP}nwr$(CZQHiBV*>0(+y6?Ik^;5UdRq4p|L=#$KfgYGrh5cq#*5d=XzH%4*xT$F^hAQP=yFFvYSBOD*X7Odg(kI)P=YAD8Quy&4gk&~+^iyNUC>;+tp**3lnIJd1GkN!!l=Y3;4t!*dc+U|_f?rgiVZL^y-twA6NaoQ!% z?9c@03P(?YQAiX4MMW`Cj5#7jPmxnN6dVOy0u=Z!Q0i0^)lAJ(htw5yN8RU$)Gc*E z?NAd`1?5diP*@aDrYKy>h-#otsUPY;r=&ip4JwP0qTn+_5mWxuI`wO2)HiiWEm7T6 zHC03v^Hmm+Gfv23b6cgX)}>8Ffx&QocotCpwjQ6QFBcSijt-}cGrv=r8tXn9aI4IW=0KDqNCN^^J*xoLouTcD1{;%4i&q* zMt%Mag`rs|Mta2|zbJvJQp{AZ**8#{(e8i>{|TcWDTh1IjFV(e%1C%l!deN#EQ#s; zP=({QZ}^Q^l-+;Wa4BQWI5aUyxhCduYeJbkbhJkopfQO|_i=jTweBgdIFvYbV{hEG z1{3>5#8^;>8!@Id;uj5o6kVX%eq}26&V74K{C`w;9U$A2neOAtnNef!9OIp-X`2-l zGRPnxJs5MtoP6>tQV?dng|UHz7y`0>k>mvHXnS;p;=?iV_mTVc$o+ZLwjD4wCp7(R zs>h5G&$$xz0 z-#$t=M(+C~_xsUUR0vE-YDzJ6Z`2DV7xp*QwNc0Y)nJnHy!zA@UJR!qg=gb{FzX9} zoyt|K_T)h4*f+@P?Q7)2Zi}x-FtB9_V8MRUlj_kA8Ds2jl|_7Hl-{s!Dehuy&I& zow!_L9Eg!y5?o;H@LJPQzB*jx+z3hn0lbEg|Md7xP>}D$*d;d#mTEKV=Ndxxb8$v3 zj5Lgd9Ir98g|`pwVwS?w(V9@f1a6tY`Jgzz+I?+mEH2d@$Kl^CC*#A!v9qu%UmRi5bK*>roSw!d7?s!P zwWrjVQQ3_l1dO3===7wy@O&soYC6w-bfr5ZC=BiXGGZRJCKm$ZY@)v(YSpN13fGH@ zx80>wG?1hK_%5}kl)}r0aP+9LtTgAAQ9Kc5ohhFilLwF5rbAiE=SP_GmIUi$r_;0N z8LSTlGfsPXiX;FiE6n)%_<7}MnLK=$DC)}#vx@dek1F0D3+yJ39*5M|9`meUZTD9o zFw7==N^4d|1^WE|%Nx~q9eyO3he>a)Ey~eyZcr3D?O{%}_+V7dj{N(FMl94e943J< z2Y*jB|KZUdSNw`lc=J%HZabbvz~HJ0fke;{LWB~bLP!t{1gg#Mqff`Ex)1iCc>8Kp zhWe9aYT_Q(R~{SHDKo2E^cuqpP zg%K3wKS?q6sucmA7ov?MA>GIda)ewWSI8-{f%GG3h#o>Z!O}vI5H^H;-3k{r-LMJn z;lkE!-PUd07KSh&;N^=#rB;lZo0A8nHY*KffX+jrpZ=Y80Q2Tczo$5BfE! zB<0ViQkhwXL72sZnzPEN>+h|JrF?lD)0$d#SR?XSzcMrymY^g>P`cWy(lH`JNZ#5Z zzsSF5b?uORL=XWhCBy@HL7qh96&XfQf)JR&ZqTQH`lo;T2%iJNEnn+Ib=#rL&4aQu zyM_=;l8m_Er<;gtH}#rR-RW!Ncl_#kS6)rhc>>X4g6OJbW=Qd;Bmcq3Ba(|ymd+%R$>c;n5t$$a z78y$-a){uU76Q>x>?Z;erI9K(>V~5A5QjK6t49d3))J#8hXXkxiOD1;gBX8usEAAJ zw59G=DpA9J9z_VKP#LSCi7Js10U!=B0AdJn8M+i5>*R0C>2`EP7@+{C5Cu8{MtgI( z0Me429%6btDJrswC?i0nf=D9ou7u=NVj%~mRYCmk;WP@79)u=}QdvelQEVPx_7jv+ zjGoX|9w{u|#(qTzxDGw~pVn+WzZiI0=RWijK_Ct>5U+8}((|p+){f=zm6nG^>O@Hl zA^?>tBbo;0dUZNblte#~2;?J0MjjB0N@-M(jO2;zBShs1DNkZMgfkA6W;e5+86ig3 zBs?r%8p2?zMDDw@G%LXu*It-0^t|bnF$FBr^|YiuG0aTK=VP^4Ti!`u6%Yaz0RO6f z)_Zk1=$8TliNsY?^iM~0O9LT9c72e)H!e>|sf0Y28aX`tiWII^2Jb&fwt0NwV=+@@ z5O^95HusN0W*G#4h1mCPHXfhExWd#~WIB9?*4&ixYZn6GK_LK^=XppgfePY|0FrNY zwT6lO_$1_bVI_eWSA7!F^5JbT#H4h(QlJ0wV1@uN<*ZB0bdOo6g*4a{rV5@3v@dsC z>V@|hM{FD)c$tiB^il?5q#-q`=GKP+;Qp5Tb?EQa@c^AYL<+f2?hQivkzhm%5l2L> z1o1@LKi!p(Rs^mD5QvRS-pDP&(gs6Z=hnmuxgU?N#ATMn$)yN;KxwQ>xXov_cOq+D z>_r5DZQo+$ey2R%Vzz&(C|que9x2#X;IF(b*Za8DtK$JHbKilCC-kV68s*C~6? z@6P`J?HpfCKb>@1>y#P-(RLPbMCO~58@WR&5H6pG7-4hamLdGA?U9mCPbnqj21%c4tya4fDb;GVT0&+g zSu(_Eo0vP7OV_<>;rUqhD^nBZi9$@TJop#&1ug|V@-7fpknJFwKy>3E z^vJd2G&%$+;&czEQFsqWgMfL|k9K}36j??zItxOGZ%3Nnbo;rzQWY^O*4CXy3jBk0 zs&@^rITg0M2t_2iPU7B9^ionVvQRn}XzrJ^Z?f*d(zUU;_l}*8)P{L^18XyJZ7Ia+ zDuX`Jlwd`{ng*82;LvMBRTe)>;bRAjI#( zL;mAZB&4h}MSP_6#hX-=2_ojROTo2YK;VNAzEwM(+!keAU|wWW)?SkL~ZlYX-}<*xkb_tlC0yvC507gd{S7wK-Pd* z!RSFC3d9Cv3&=?jRg4cudK}1B+YBH;e{w0V9o@}7BHQ=jNXXmi_}xg8v`r77+RrPH z(yJF^fIFJxsUjx_cn!BROeqa>xFH)?=lTOA;zzpfy6aDus%zJE*Igq!NW&0I$C8lg z)u(0yy&u^F27Yyz>tbe+TUAbtQ^KV81g4YX*vnd279h_-I>H#fImk~rv_&xNK&F9M z$7r}12h#iD8Tf}CP4e0u4#KdrlSHr(9wf0zNXKNiOHHfRfM7nxSs=C22ydg|F&^VF{$he9L_i7a zP<`ET6Qz`+H|WeVDRjC+?(2hltK_~t*0skL6Z4sr$0g1QZ&k6;^j1oz4Jl zBZxkTG{!N^&oR`B#~3&27DBH_7$Z`Z#ZOcc`ikRl1KG(WsfSB!vYOab;eadFQ|Q)#1P}Cmgrab@x+d z&i#H=PL7|Bvv;|8%rbXQn41Qm{V+VDKx7~a3uSX-ahdq{cq}|mflOk2{f;`0#%Hs( zEQQWWK!TEdH0pULNRw216xRfi!XzWnDkAodrJYQyZ>R}d_7CdW*Qzj8L?r~t>ZrbD zg@B>)U9N39lsWzJbD=RFkB8Zx9JB1{SB2qtbLr-gsj%wFovr}u6Ho_uBwf~mh=C5& zS08SbUz2769b+K07vMk=yqba#n}Yg1mCoP@d~|U&Ax+eUkgc6G!d;clWpAbGa3fVV zEs2}gPoH-`9w8yg0XNZB%atW5B+SW=%DEw|d%#7OW!ZRh>BbP&{D;S5ppgx?`zrTQ zRvJr-iK_;hv3~?K9k?D&9ypwQ31}A#aj!!_&K;Xpykl59=_V%Wbvej|^avdtKau3q zKoF!&LYk#?He-x~Aa;_FwwZms=ei+N5;D|JFXRug*79Wubn&ZGRlGMuZ*|Hr%d%l> zQptUNu+BE;69w~^x{pI?dop6%bGQI0T}PZ7`E)7nx6I7zG_I$d z*_Qy#>$IimA0mYp;}c;DS#`%T@$cdCZy(LM3?J29hj71T&rVN(v6?ugg%V>D z{#VWpVR=NDqlhHWUX>egG~JCy!-0-hL~Op;ZwIT=()~KyMd=LgMnWQ!i~!c|{hi0t z?0AHgi}C}=dof1%+Fs8v!Lnw7lu#!7RjGQ^+t=UfXNP$Wp}ObTqth1dI?l3jb4Hof zzOfV^j!thHVNC_fgY{;|5wo0Z5`GhK=%t9WBOx{(o=8lFwoLkt19U{)pmWF^@`(s| zb*1IaZcU(-iF?u>Tj3_ndByn1#&^wzSvJ0FYf{;y9A6hZ?E$38gw;0|lv^pYtL{Ev z-G!zd1L3_7$D7xOefVJgy04Sw<~PP;k&v5n5fGKB79+iIlq0wgX?C4%BI5SW%*D-}hw?yA-R_~OGV@ZqzYv&hV|KU)#|GU-*mmx%-xNnE zA(a#K>Tna%`)^c1XRDNo+Vum(Kq&WC`kM|b7ZHK)toMtxR~-IJK&E8F!-koO!mkW3 zT*Jx(o>yjOH+keRQ9pn@;^;m)(n}-AK}c}&TO;H|TCWaX2nBL3Z5{$wa5;xjoEoAN z<7p018tf)%?A|LD#0OTvLGP`vI38QRJXWdj^pvCg@-#vjVq+sThv7i}kf`Ye9Km4( z^b;ABnil5uC^czDT8KELBJbk#B6RjQ(kM@+jR2YiXiIB)Vdq47daB(20YD2ul<_J@ z<4DNo(mQ&!zlRi{NT`&MNS{qQnqb^@ zZgZR4+z#eAGO>%;1)=>%fD`22*lr~B&hG)Pw} ztX@eb>JY{9Vo4e6)upBQbjWlV?OnPt&abaJ{JPY`b&)`_35e{J5rbhmQo4X$oYg~| z2*NzYyH#lwq zsYv1{?`tN~Dj_JU+F7M+z|e%Yr#-f;=rl;%{lz2{n7BvKHj$JKMa%YadUNwgDXrF) zArB+0r9eq9;#;J&w9&RlP*(0c7Ir_gOER)CuL1aQfh<9;G-sBt>cBMr;o;S%nS?&% z(4-KN?Qe&ndif7?wfrhbAo3@r^auaa=||2WIp2P9TDXIC(rR_D!HXKEl%^R&XEjwR zRsY~(Iz7kilJi!ESfn#?i2pLl@?n8o-}k~LW!1r46cnVAq)^-UzZh#R4)Xr$_u!{P zM2cMgbr*tw7JT{NB>7(u^st6&aQsNy7$5$^j=+cccr6kl*X-5DqEX6OvQT`D?`3_} zF_MdBbP%%6M16%~-9PQ;f@T-v?XP2ccM+gZgXGmBY|xxOTnpPH=Z?8)#JM>Tz&FIu zA-?cbBfS4ZXt7rA&5J^%`cckC{Zbm4vQWZI#HZTkLn_CQucU);wQa|@ps5C+y1!rC zE(ke13Is$Emk{meXy+I7FW=Caiim5H-+t?2oJu<>{S5*dLqGf{ZYQMLha1tNDZ<#v z=k z>HzO!wN`7jnh{G6)$N?yC~F?8bqy=EQ7T{0OV^qUYo{^E7*DfawdO(ua6Egc{X_<9 z&CSitbxS)Lm+G$^&BN3@-X0nMk4dWgAx79-Se<5=B1A~!Zf?+}NJkPxWf=8Hv3XER z6u>CoY(X(K5g-44&&sJ0W^wV9RGg%XlZTJ}l=)U(Qv{N_4@eaO`<1@V2pQU@6+>>2 z+ZB6`$7wv5&Qw*0_ybE?(vlwHWV}@0{TmKu&HA`aqkP=;LK@EGICfUu^*^wv0T$lI z%rs_Yvug$6>xYb`7{B)Fr%n=Z=~lQ z*X}E63=!cp|L(E4ZGF|z=`CJM_|b@27Q>vB5bz&ZVf|wq!nthcAw12OYmpI%Nj}Es zf5#BNT+8exO%lOsyR$>=Ov_;ag-7ifHA=x|1e(eHOvHmAFWoQLjMIPbu~0cZmctabJF_{nyj~-;Ij4~FG)yVDw#r|x z`@C(Jn3?sjot_9Vj=-==XiE8;Q9)U-1V(*6|KXusQaN_)^_5FDy?oUQN6JT^`$*RG zk#~En<47&Mew+zy!(EaSbx$#QQ8d)4QM(jr7S4j_!ADbJ=J~^SUlL{2C|6s5K%u^( zIQd6V8p@Z4cFTwN+b&w$OYQEFgJhNqstlP^W&)H z7p#_8)qMx7w;*R9_^9)5P}_8fsEQjc{w^p?`DgY|OjNSn3&rTClneD|)HsEc1q&o5 z!HtBzFt(YP2SI**o2a)SFCV!gy>xwb*8y@3niZ6hCUAWh6ZRuaDQ_$*CtviN^N(+j zEkS<%!Pre84wl3w<_U&Th*Y~7l~2KB!CFh9U-ZZ4Krx5r2G`|IZ#aKeTsN<*`vK4# zYe~`|#uCT|C?3YnFy&29m|sVjxX14%X#fcxP^30NPtX=eF$8FT*UG3LDvScpM&!?L z7bbn~bf@K{oBU+kU)I+it*PZ;lkf)G3=jhwNs@#(V{+INV@;ecablaSV<@F^FGhV) zK@>!GtR=~ogw8-K;5u#sS4`j%Tt|RblQf*T1d>c`lBZH*C}l;x8ud*jQb-xH){-E- zC2lQQ z4_+w>nL?*9a>T7#`h`$v6SSXbHMZ~a{{I7qQP&yBBbQFKSPMP39$PCPQi|dttc5x3 z!MNfM`|4aqDdDHE7G|k$#jZ{evbO&{2RPqVvzS>hzWO; literal 0 HcmV?d00001 diff --git a/web/src/app/public/goalert-logo.png b/web/src/app/public/goalert-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..44e8b640942d528743e0738c6fac3481b7054699 GIT binary patch literal 232146 zcmeEui96KY`~S!jEh6<4g(QQr%a*ZKL_;V$W6i#eeeBVyB5EdN87f<5Y?+LmNu`F6 zVeDIsZLAR@%kSv-xN}uC6h?&wcLwe%<#udT>=&^T592`ydd=0r+Kg zeF)^RHw3~gy@v^Wqi-3q0D&-QyBM1Knd)4D*?D@1+uD0xcMuQs@B;r1fheg4dfD2! zIryEt?%?EtR6aFZQFH2~i@owG6ImTe9WONpXP3*tJ`M)Kx`uYaZgvXxr&Lw;DFwm+ z0S^a1+mnGF?nqx)pz^6-bYbB0*4GlJPX2nu&rSK%g{=)wn(ACVspje9a8g!WPRve9 zM)IV*g1D5Nq>QYT=*hE^Qt}d#vJ#SVVvg5Z3KEiMCC;7|18<1=1|j`y1I3WO{97dcp`q^J zYv<$Q<>%swJh?^F_PVFPpYo|w5?h-{{AU*+p2Yu^5B%_dHRONaxXKp-a} zaP7Y4p9TYXxe5xl3fXY|y;qx4gM?K-4>v+iKtg~UC@9F$#RR&L!54g6OF|Nm=% zq}3@I3~8|Y81IoT1SwQv#m96_aw}HEK2!G0kI7}tVZ|zY_D_beDdM`{bj*I6D`5Ox z=!m$bMS6c#H8H!~jy$P|@1y1sel`izASae*aL8ok0b)m)(}#`ykY~x$6{wih!e+VH z6A;K`?=|XS{IHYLBzjE9Sm<*qFDsEp#!>Kh;*i|?YEDj`i1?M|vKq}>$O>G6akz9y zzfs>C+KT6D1-k+yuvfhoVzbF(pqvvtKcy0cN;?~(k^{5vgl!{b%FS#f^ts7k>Fe06 z$G(cbtCp%2HD@bolqx(CPck97`%fVubDgPI!w~*;Q=uXL=Kih*ucq(YHf*C4!RUS2 zKyC1i=e%@)?C$>jylTU+7v+wqeEbR1t_P4@M?gok`~Ve$_n;btKs!+yiy3Q(z1yfn z(eKkI2!s-4otq_Y@hQzl;g(@#!Opr|44mrEm?AE{e!WV}+LY=x#+K1kLVHEGEw?$) zZHR>_O7u*I1em1dp^Q!B@Al>601qhx>JVe}eJ;pk%MTxppp|K9>daMB!#~#698LfTbI=(|1#~lU{l_ z88QDs+#B4+VY5?)eJ1Odz`l0WG^6F1M5e+GcTVMqk=J$&#FoR5CE|-IEV8fp{$#fvD&Qc+k(Q_JC{uF)1oh^dM0)D!0(ZKs0m7UG;Or1y5^ zl$WhK$Ex=sPrTW0qXTq2c-|eCY7(|NrQ|!sy=^PVQp*FXY9baVf@d>MFibEBVVd4$ zdKhRZurdZIEF2!b7)JeOH@1CQAxo-)qN+)sepu-l`VupBnlXSOR|^EeRFr4Ijpwi$ zSaxu2r^5DSxzEL@s#t6vmDeP}a5H;J&%wGNZnG^}dg32sGZ(fqf?5-J_?_U((}r_~f^*FnuIvEP=&V;0E$; z;LCG^od?Au!ie7AArRJ2?eqZ?>sKmPRe}sV*0sm*CnX)@^>lTv%*n}ND8xM|Wyn=d z#`ETi=j0dkTVYlF*thS+UD}E-k(HGd&k=Wa^?>BU`cok7F&KP@ae=lN=eD;a7TpXM z=ex>sf&&)0s3t%bQ`g~f21uv@7v2{ig`dQgKnJ&G5&wQjL!I4iy`}j%Z@rP-aFX!g zqdhkMuKH!OkVEFkt?lt+!|vhg>J^%?dU0_!={OHW?LLr0ygBbicKM6d5A^S^ zeg92=>$442A#e3=md3|f!)7kvC+a}M!yftKxf{74Wu#Nzs&Q=JghUVUGQbbdXQ4jc z)e&QhI5O7AWRqft?=Vue!k!J>zkQ2bhvqaa7tT7b-?9(p$6lE4!L1CjVE{?fsg=3f z`ZDRj_7(9Bn53i=@`g3-ZL1Jd6kus#p^wUp$K!eNth|cbvkjy@?;T!VrdUO#vuy)n zDhJ*dakKNk01_^RzUKPw(}nFD6rPupkyRDfvG2R94g2K0 zIJOzDuqb9?_FiApt5hqj8IPhD$8ev1Rn;oMqi+#5mh9ekGwQuIUGDiCL5sXN10Z`f z(*0p^7>XQbUypTN9uzASzyGUHc#-4` z3*1+)et+{8VQR)E^PwyM)dp_$N^$yX_fCT4UKWdNIH#s20?x@WGMf9t!=Dz5qx1lL zB$MY&S)G4u!#F=gPrx~`^}n63s5tXwDF+zk2$f1*--)~SJNPcXaSbsT7*f#jE_tUJ zbNy0iIWR2ct)flip?}1!Nx8Oueeq$7n(5IBd`-T}we=u_(rKJ)^A7ePvDXb2zh^z~ z@x_ms^d#N#%njo(QALE^_tI7Qkw5jViK;uJ~0(qULG=jYMbgJ z)&l|;rvMh(?=&|4BI?}13VeW(XAbgMu-yTsnRZ<+c-h%d1LZsPW@VAJPmrv$~~%_vC8)Ln8FH9lpedyA9N&?vZTev4XU0)h{x|%GIu(>2B5-L1{g%q-fN1;v;mgn; zwb(>g)MWeWIh`Y3AfBUQYN@aT5s@x-U-bY@KviWhve;$(RDAcfbyI_E#-mBU6kT^q z@%3sm)os3-c)S-8_NKgabvb{)UZa9nR^~isQLI!ofHczbkS8j4-}^@_ z)2m#dtZin(9OvRM1!(%ifwA6EEq4}(!q}KsO>X%oG z<1}~l8R(RNV?WyMZ3Wca)}nUwvodwoWEQVt(I>w{%@ALdm%-0rO&sqlPjQJragN+*Up!VPFa7}VfQ$1Sp4iUU4n5<03%PaiH_}&ATh!WL zOUSPu+jR7A-HDZRJrVfuYgNSH`9ZPCu1so8ZnaDxGk4+t3?gDqK)!sp`ffnA78oQR zLdnNHub39FY4@nJ4_hMXsCS_pi8V+cH|0+OKDUIZpg3~m`ENP7N0rEdFkzLlN-qQ zvZXAaC4XM&@KarmjkJ(`$=YhPm^ZQriRGzL@2Prlf)y;eYej2xx7+oz;BN)Pk4 zx;q#8{mke>1$idraXe>^E@fCp1*0Vdg6t85tDn)}nQHeeu(TqmkdT5nxd$!!x7hXp zMZ^r^G9k~~0*#@#BijUSYmg3OLeDfFW%;rjXviw*@_a4S1X`xu_4I{Y8chctb7lTM z6sU4LN{ZMh6K))>djj7uNfKjS`h3BKXk+!vk&DT67eDfR0e1>*7@nGtVOi>6>Y+7X zBxBtmBv@AMyuD~ap8AHNg)+N_0$q|kSdlfTB%Nn^P=ySy{VcM8jzkKEwjX}tD$M?J zp58nMdlTI->nm*?Z5 zKaV1hX-rab+fJm=3NY@clXs9|bW=-u6jPDWHDzk@#_`;AG7Syo$n1(?Kr#bvO1Kkg z((oGvexDAm+M-qA()}n~w2s5hCd=7fm(bgEY zZ3tSRUG&ncL9*-&IZaSOl)QPRsA?yf=;&z|Ib~Muc_9~~N~dx|t2@~u#Sy{u;|Oq@ z5c(&Xh8zO$nonZz4JGnh_=a3v&-PsK+=M6;bSWt>R#lI%{&pr&hal!_4?@ZwIE=DX z0xb!;e46Jhln4!iqt(!rgY5%5^jcLS&^Y>$&qeOMd-zmpNJiJQL=m1{x9jh7d4-5- z&o9IKQ|ukS#@kA!(Tw1L*%T;O{pfZ+NS)n}9T{m~WRg!VIQZVHt(QSaho?D)F#R|N zIO+2rgPI5inSYJ3Eqq2xYt1FA@RMU3c2I$otiqFY9`jS&@3d`EX+>Ma(d4xvNhhVi z_#*~`2^A-=Ye%x&j1^Q6B$vUfElaltHKf#IK#Rcc6N--IKptcID6ry=W=>$edBOJm zfgk>(2Duj_zUtFni@Ry8tgBon_y;> zV3tY?7YK4dKPWoGIGqIze@c4=S%$UGM(ST>!qT zw*W0CY#Qw$G(eQ+?DOh4S~QeR&nN8bjspF>Bwvpp>l2(b&3>bW2V;>+W7Cd2UjyVp z+n*&*Vk(!~V^@L@Xe9|UJG|d$r(VYk0z?5` z8&RD~+a{IEs)Qrt12RbCAzVFHa=jV}O!4nW`sOo}WEpScFJC%Nm* zuR)h}eS&r>z(&$C)iIqG;X3E06?wjd)BAQyKmyc6NTJ&6<8#w1^El|T)|P-f@`Lm0Wi&sg@@(Vi-bSV- zs*9Yr&G<2}8Z`h-)ACuS@G%Ze!cRP<9fK2(?vz}k2Z(ot5H!CCbT>2uR?i?{f5n3w z6`c=qcB1`eEzfNmK5_%PSE%uj$oAdO*H~a3f6_cD?k}P%)?3|&Q_M>lz-?MFvQt zmJV`0fGm1{;QlgNFQZF6C@ksGQ*XIMN0G6WOf?(5E!|xc2r@!*^sat8_tO?V0Qb3p z&f6KpkQ?po(D28O{3CU>VwpxRa=unP6$&iJfae_-L|2|6^Cqw!gLvxr*rVCvS$BBN zk0UD2XjU$A26cVA;Tr)-VmzGtfY1C8)0OVxf&6(;nN9~ay7ot%e74Aax&)OeADzJZl;9l#m z#3_}jwr>@MK8&__X8+o2$B^^9yl7UJp#mSks0b~XAo*jf`!wl1sPFH_9?LKymqmH> zzaTK&SXwuQZ@Z^Jl)XIAKLvxd0f#e*w8JiP0XBocQeSc4 z6!IiHl03@&E;9`1`dmbJ{9$9xlLc@>3fkU(;_)tV z?FHyc*B|uEI8K0rf$S zg=oGZ+K6g6XBOv}KE{x##%T>$p5smim1IebhvHNMRS+ZZ$F%(+p6*@1Iub_S$Z&}d z+o#DOlt0F^+A4u$^wuX0%Bw|~SJ2JZ^RYlTXhrU@Fw2VuihHSrH7ujqSYX8~Y5K&t z2vGF7nYdB14G>r*#033QXhmb<;P#~vd`pjH5aP4KfVZEST{C3?69?;}K>B5g6e?39 z`+>FQSLVmyC$RRuJ8MtGwL*o)tcJAguE1j)t#Fkj#|Gxr+WTBj14@8PO9Op5+o3fD zf-a!TcLoIMbWW>#g_1Bit5$4X2IJVU<(Hgn8okeR-%&za2WtE-^M)ys88MXwe%ntz zA*4Ma*686@_v+}Vbxf#CBa!X7|)IrAjwpn$z=LS1HC!B84@qGF)Q*s>$f!5_O~?Doy0 zL=+*~_j{O-kU!yQ1N52!x(tXi&jnXWp}mJr?kkae8C`l|Gf!*28XB3vI)QsZW2 zi5U-6`mJZQIR0kBh_`Q~$spyV(JsMpMG9fw&CmpaAZPTWy9mw;!*i-FrlrW$`;@&? zsYhIxdQVuuGW3W73KHa+Un%)Uf4VU;f2rGzE^qFl3#69IOx~prdn67p&vDj zRd^}?wjrc^_A@BvMMZv>(=49Qp13S|ae8SM{M4Gb)93i!L!Hx`Yu_|<_VQL1R#~~0 zB$VI$Ypdf<`Q6vV(%c7R$p!`^v1rlRmTUg>)n*iRe?jPEqX=RWtF(|h6Z-;wzMuNOhB@2S{gk!{H-3s-HNw#(Mza(p zB>J|i!R#`DZ$XvfULRc~+$i_uwsxdsFNg_GQ0I8ySz3|bO@)B_R_1O+cyzLFmZwXj zZommkvE;FMebIN7*FjSC~AEH zrf4=HGb32WN6>^`K}TtWd`T)z#LjScDlAASrn!v=TQz0cDPx zF~i~NHAmZLEG)}^Z)hW|=c;A6q?3Wbou%$fEsQ|HA;ww9WVsfN8vWtAx;{o*9?Z!> z_&IH%BHtGjM0H))#3{Y<)8jC5Dem7aT+;dV#=m*J@z%(N=y*dPNXn{B;CWNzx98ml zz^Y~(X!SqFu}-|66115j#L7U9AY#&BL{Dq^`=|Ae{Ql*ynf(8z z3UZ!Knx4<~r0K+KD2t>6FK~eDJ3#j1U3k-Jo)$Z6z4_-*YtW1XqBh9jvG8Oo!fZAk zS1w9^NKvjbG@@pLSeLrCT?IYgi=#g-eLm(wykoNk(KACXqMbmDy_SHk5iT^eInXN>No@ zaa3OjVyiJSnXXUBjz32uJ`NMNQZ36A*>-mMonqy3uU8iobTt!bP#}7R!Z&>VnMSX9 zCZo-sDpLLjNZ;v3m#`fuPsy9SyKD^%5SC5*otb(?w3jHn;cSC)q>!;WG!J={k4dU_JaKi(_>2<^Kp*4?x=zUMj&<7rmn zW76^Rw>=@_#Yf=%SQ;2D1ZQAqZBSkjp0o5AtK~S#-ikaU806mjo1ph@;qj_COP-M0 zx@cl4U$NZY-s?BE+sG(PA&k=tVJzh2?lCgku1B*CJK?3BB!67D|Mi;^F5HKu3w=4i zXa47U#O$uZGMwM#*eil%GGsmg?YK312Rjf?K;oad3d_(`c|v1{6XQVN5OK|S=E_)v z>xZ_@G~wpiMKdGpT6>JKRrQTiv0d#=^X2r#CpB%10pj1L?-@_t>^rk)ybg@bck{ag3JvS}B+{^k5=->iTc>1)OPX|y|5X~oUgecIbmy1gyOO9O&tWC!ZQ zuZYe}ynD3mG674$BNw8FN)qi^tyqQ>3fxf=bN)B?9<3Uh&994q3fr0;?n% zUnc))HvoTGUICyj3$FkH5Xw*X6f(2kDsdVgy4>VTgm5F_21Y@S1%c+9_^h+Xa3UF9q4=!zrA2_gzz0$ph=6+*cmSmGJDT4#vX z#(aLtI&qU1svtlv|CJPlo3M!k;V+BV+$O$7^r&M~UKhu4cs%3&0l<)1g{i}(syq7) z6$;&?{hCohVN0l#( z8{@bz-0cm%tP_SoZ|^-&azrQ|u2Oo{F!KiAy};Z-t|>)D1?k$7$s3Mm;-O+)^h=lKMh3=bC{6rLV}vqu}Z-Rk=V}m6ZJ=y z67SOPk(F=!xL|o@8~I$%tS(a7DKf5BtZ(T%H0{bfk;3=AL$aTdwaRB1>bbQ^4>~24 zw(w65_}u6>L#cniOc`bHNH1rR9WiKKjMqW1hWG+>H)Y#OFu=Oz+f zWqT^Xs&j$;&4t2X?=!LaGc9?nztd-`1Ct-`QSC^-u4)&#XybU+7vX|h%%q4w4|GUe z%y5^4 z`w=7oC&Wp+ZiqJ$+4-C9a}B#+LYD}dnZYs|`Z2Uy6i{6yKR02!LRtAiWsxlMC zxCw;%DETY2`%TW(N@=D;!@R{lzSOvFZ95ZnHyF2Rkn5odSzYI@-+44@IaC;8^@R3` z!fuJuLHnXbx27Tywz4Itu`41NYwfs?%k*dm|M{QEJwbI@52h?*XLQX%9;Ed=Jkq~y zV8rRY9}v$6u-ZX0V~cv(u_SMQ4fKMxre9YHTInnqI4NK~chqrIPZ#fRhgJ*muj9iC zOLR*BwA^$-5N05 zGm$HcZC6+1FwRM(f-BmvmA>gbC6t@uv=z@;o1+Ooek?T^_w7(eUpR7n3ubv~%dZ=t z0p>$a94v1J8qeA~^uhz@YG^-6*FB+$#^m1e>8p;BmP6 ze5fKOc;R|crg^Ss4uYEHdc(U%fGeRnVS^ho=+u}uF{Rv7lyPl?JHL)0pv> zFmezaXnR+57NOlQUWZ7(t#70$pm2t)hRdz&(c9FJ&ZSJ@d)q&0#TldD;70YbyAzWq zrnG!4^}}(x`MTJGcBAT_@%{$MAuDANl1CyoRka9*(-WZfSHXtoR)qC>dYf)U4a;99 zwgo)to<^B$yW7Wkyd$HlZ6%?1>DdR#V4I}rbXAew3ki?v!=KyDmQV0&39;Nh3=UT8 zpBh6_lW89^`6F7sb*`g`fXVkOS{g89M{F8o*DM&}q>gc}lK`ies`oYH4ZapGAzPSDyC7tlVv{L0VV^NNq#8 zU*}Sw9PvO+ccI`YALsY!xngjiiI&im96yr3_I^)6u_szWuuS`F46+#FWK;mh= ziD`y;!8H}_MvFAzAV)NrqEmvKr1j-Y%1a3HZjL2qA%%JVA~~OP3Z7K2NDB5v6MzfT z@6ue7d&-=gEXI0{n@Au&t4>vn@gq43lD}8~o|H&|xc`hF#8;8%bWATeBYJ$G^?na< z>T!&T!yBgFeBJpF0HZ|TcDa@0Ih!1IDk)n1kmK`_!Qyfbj`rP`hz{4ipei}kGomi51Tnaz(j z7daFwd_Dqrna@>p2Nax?UJQ7n#eAMOs$t4+?2a*xH&c@lnFmww zbrac~t=r3pEa^5DbMaji>Z5OC=X9e`kYQ3I_L@{OEtkUX7s)QK(YFD&GJ(B}lFK@l zi7GYgW#S(8O^ zI%u<%N^tX@aKT{jI@||Z7gvu$7(#Kh=Z0$orQiR@wS=?3^TBB~NpYWX2d%@U0v(_8 z6MN!84o;up1;`_29Q{E?B*|JFC?tW;1R`{Jt&P=G7I5gJhPg~NFcs=7v1g?`%BhGm}m zVTG1DGtZI}H!FZi6ccoJJx}Q>R-BdTY6`&7N zMuul=Z<~{_)HI;ijKF3vS@KFIzv*#)WHe3AO6fRfOkls&9cp`YSBBDzYTbLlNi@3ercH z`|XkBcFZ1|9nf;KL`!Rxu-s2a(;q%)pKl~cW3$r$kS(BGfP8P72=U0-8Z`k%&%vot z(pzBe>%9luva_J|7j*$0y(? zOkppz&dlu;JHH zCaoAhTjkSrBximNxI+Ds@y1&GgDVnC_Pof9DXk)Jms!}_E5&UEA^I^mZ4Ux{^91@Z z#a&S9PIRf1>r4{uE{IWkzOAbRVCc*lI@UDRi?_^MGt^hk_G^%Qh2D=t-H?%Q4QdAM z7&0>iVp`r+ybyrQT(5!(Ee@?3`1@>Rp;F&uVjoy>OWu-uLl;o4&d2cvocTR(MYPN} z%JeAh2f&YZ*M*4TJgd>mDKHJ?5HPz2%h299Oa7^?3Aycwnr>1ormX7f_Z9dx91OO{ zg@z%sd>27X&QYYm9fGcjCVrgpVjonxs&r#xKiS&j*{8jVRd=eD+lv$AhKQx5lkwu2 zzdM0saDSR(y3^m_Ot?;lON)g);JLJx_+_7qz%hfY(HtNOm;DqwXH*&nPJX=;5NpVXHIpZRKB42{}dJEaCaS>Yw{9%%$meoGMTBgMo0Sp zOQYbUBt;I~e_F9e=hTX67n`6vwoV`f`f|0yIlb-vR#G6J0sc+fgwJE~s?h1cw+WbD?nM)6~D{ATxnhf>FH7yG< zO*nm`-!O_R0BK33{F%~-*rZ^MLHQn=-d{swBWh2G|GJu@QEGJ#RyImIq@RD~E2ZBX z^a{W6OPX1APnu2zCx;Wa3@Glx>jo1CZzu<3#@?UIiam|JT8{7D-=U4_V!|Hea}&hYG^|_ zIT=hHqm!jgrk%x-U2BT8#K)<}ggiV)D#f-Mk_f2<+C{SMP>3d^{Y-Vv7RSBvDzHT!$1lQXuoN{pNcUKn(P&m`FUP8_uH0yCn4 z8UXzk>8qNvxjw5z6l;&{In@(>6qa&Jg1c0Q7+1=L5?Ihg?e(#O{@{iB=}ZKqZVRH! z%C;o^Fh$#eszQb-cxo#OO*=0R&D*(z;hT61`ijRmUis2K$m`rF4!QI@qtQ3R(BO93 z36N6;r;k1I$lRKihskI0BeMm90zhzg#?wln?-sC(;}u}~tcJs_8VAqyhIwVz_qH3) z^dFo0nzYikGGvI=6Se-#kwq7Mx_QRY1g+XFRpZqHFk$tbxZ{u;ab*)tefrc6aIOP# zPSRc4M6#gIIONf@m|GHh;-i^A#imwvALZz<2wf;T&?oYH(A!)A*YJ{{BA^j?J8|sM zqs$%$FhgVt$=iLSEDXICwv~c3SV|dcYNwK#a5!A`GjX=kyjPg5xn6;1BIni&B2g>O zZVVALn)RS^b@V|BAv2D#UN>6@8q_zRv!n-lp-nb$*R&}V{CuN**tz;$M`7ezDD}}p}T=XqJ!)@e}^9$zN=ZO_C79R zkNWw80bOo0Gx@6<8_tI6rl_onY@Cx-bL7Ia57>>+fmTb)0eYAcef|AK%w@_USvhmX zo1|i_uvbsP(ZB`A{k^cSqgw70--7f9+Otlhq41r5%&poPosROn)alnIxWNTRuypPB zar{3ycx+!|*;20g-Ix0z;}zP%2aW64>qe)#)>BFS(7za)v#@)BJy644vUHW(dXFdWXKb z@fOZc5ZR~A2e(AJ`+G=!UtW7m6+zMx`=zijo#RTBXwxJmtw+`0o-HgLN3%3!Q%W6D=YKXrc=Tg?2q+vdxe?ThN)B5d z*myC1&|~g&QlGZ)u~{qYQ~BIoHRF4tY&iyx+mw!<6k8RXgO=C4Fwl=okvZFwaE~Up z2j49do+gbP z;HAT7#*jF#A6G64i2Fp42(6Ei$9QvMc@)o8Ra&#a%cC$FqsEc_^}#U1!FJxSxp`;8 zU2gWgEPq}6h={zI{73idKAqmRLMQ{;IiA(^HsjDYl_i0p`X}pAF<)F_nKq(kR2}so zDn@>L;3HcRFL+R;BgU*o%nQL@vEJ5oBLMoOnT=-AlF(|pmOUnXJ+$r;R|wMTdz?Uy zdYuM6@*b%q!c}N=wmX7AH0l(XObsh!u;{`pU{XJA8u2G%E<;fDyO^oFBAcO>uM28i zjBdN(_lx;NrPVmAHCw-tef&qJ|N6+7CJIxJIyGlH7;=euBICAPt^}->Rb;bopqo#- z^1T>CHJefq5UTv|b2;M&(e-ZbI$`HB-2W2JYwA|GEsxBX?A(1x;!^ih^^LHTV_abh zQGMSIf1OQqOCcZ;;db68@fp{Z1^PJK^N*=WlY+43!`FOoa_D7z_b02Ao_VlVvytMt zJaR>FdBaytVQrE4be-O_^5WJAX}>1@v)Eit?3~G@oS}rX+vz_#L(asZ!S^6m#x+SV zWIU%W(d9Ys^C|;wXx9V?_gN33MCQ;h)h-K{Jgy7RJg^&iTcOeizZ}yGrul zG&>P#D`M8$5yl<~FUHPzQct9Geu3!FJy|_@lc8Jtdm$Gsy7M-7CT;MIaGSO>iou_t zNtv?O-)lOQRfO1InK^z?>>+7?gw7@ACx%vb{EOLY=PIcR)IAad=C)(GYae7CihPfr z3wv3M0Q{MMvz}#G>WUB%ANdsD@nF zs=v5pqBMb&H!6Q7G+ouxvrw+Ax|>pCqGMJ0!s(@@*&kgvBg9LgQTI8L81E>L*=M|= z*jt>!^H=37pw#jcftR*wAmTX1_hz;67`F;$?uS{)5-QM3`Ri?1NuUZN`J-dk1GU~aOwRe^J{3> zbr6ZA1w!kdd%{~EtiIhGXPfq*8t2N-aBFQ#r%iQlWoz zOG?-(tC!UlGfpxUNrsQ-2fpQxyy6IS17G>^%mr-mhXkP;13A%8PjS-AntF`(XBm|2cMnUGB?b{9=GdjYhF_j&0?_7tYqQCn-KoFaN?tfGOyyS`R7J;^S;p%?+{81O^xQ~DU%{k2qyBSMI{6Q zw~|cl?Dt}|C8~ttf|j~-|BC_>8FVwdj!$wXF&j ze=oIW7ZW1!^{Ge$!B#l|N~m(M_}A%V5hXjAtvAfCHi!B&S?>fk*LYQXcH?9><3%c$!lOV=i^RlouFTDS2LYuvXX1Fn{sUm{e%qjJ?t zLba7R1HHF=@3`cDbVuWUo*A4FHhEL1sr*sGhjjvg9wL?Yh3Yhp%wjYr z-8gJz;pLik_|a7jV>WAzJ+qT-$Jz3BZ)L@qDC$Gvc?9~C0{(a?!Ba}^<^X>8{iyPx zs*0@aVyF}u>1^=#9~J0QE$ED5_9t+(PI*yI6|G-#nAbaB46@-5jzR5Tu`%p6rop$S zBO*=NlYJ#)`eBA#@MnwK9(kic%IB9B*Lj|i3O;Jkn@yuXKqvBbPhfi#4Kc}kmGTVS zxTWLfN)RHs(Iets71f7=`shp@CQ&&Ch~DnfkN|(TnoT|N{x8;U{|nT0VW|FV%@?TZ zYG&8hj7#akXkN9I=}Udh}TMliX8@b)$#lrp@Oy@xxIJg8oJK2D21OLSw!fg zJp4k-wDyGBy1yYJ-2;i)!+7{1Nfq;O+lxLtl9UH4>+DV4Nj|-P=BsSE1JjKPPDZ!I zb~A~+JM#9*{oboCFW!sIzgWurbHpB<5*=IuC}Plgu<)gtsE|XAkX3^kq#ra=N2|Cy z4Yxu3KTc$|lNOHhx%bebbmfNV{2Wtr_L; zXYR}k_ioiO^Y5r6tgiWA4y6MK7;B(9_-0Pl#u5}b|Ct)_ooF<;DF8T z=KN1lF&C{PwJ!#=t){l$qP_4|OMZFzk8zM^u1PR>g-|~9qXB4)1JnrTBZItKIZk2F z?>TgFFS_2`Ql0f1j8geFI8K0huVIMqfm%iBi=s@$5A4&k; z@Yees0kkbkX9t}~blu!1>gQa}e$DI={vKEP%I}nVe-Z9&fHoynE+t`j#oXE>=gZ8) zD0vo{?zlgL7uRbUU1wxW{2m{zeBfAufqfR_JThHIs@`%oRfdkEps(WC^OZ3#=A5mt z|M1N9y>o4ePs4i+Z3oPqv;%f=AI41_uyY%m8ECl+6>}MOql^5+O&q!7h0Ja@jeFOH zFi2Iwpj;miSB#Dat%EQyYPdGY)Hqh)oQ(dIm$$yq82P`!D%S)z7u0@#e3%PLG3VXX zt!qzKBjo?)dAYj=(yzbeY4R%iJ^Up0yw9E4C(f0Q60826ddh57QCv$9G6;rsG5*~t z3nhzzV(R4Qbp)>62GG@2{Tph7&9LIS>P3|P5y|w9>)%Zq+1@*x+8ixx3$5x`Ud{WydZ+CseSds8c+=IW_#CpneZ<#b)aX zrBr3s;wpa}D*8F!Se;VB=JfgLoM^ePvEQyzoRH_pR|0{si0Jv>%$aVM#G}^dDxbuo zM}K~(|6abs^*w<)AO^h7%#Cc-g&L4i*>=UaJJc~0lWtEUyR=)x%?1|{!YStxuRg$9 zoLSTHlno6C)d_PDGI`S0SJ&RuSAhO*sWv!*HBVQqO+*;zCTUd+y&HF8J`1YCC#`PT zfSOyk9Z0LL`?*YM!)aK~@p0Baw;{!Y4Kfw?%BgP7DCHbA;Y-Y4iu{jC>igZ_o%EO0 zR#oWZyBR=)4VUJ;;kjn0zjrP=@o8lH;eOT5K@*+|*#5}JK9c||4T8Z+!DO2-Jr|=v zgqq1cHhytj8Pf6{)qg2ct&A)GMNx=DxmV8kiAT=G!D`ylx)xr=}zx?f!AfqPJS+k2yeJvvOA;Qz2*% z_e<`o(90$=*4~7T$?u<(S#y$UOGmsgUj1o3f6*W%j$z`~CU-@%?2y1<8p z8ys%_1||XIjs?^Sa~LYxIMR8NH{R4`F*usS#_U31$C^uNOnPe8f0x>dzL=)CWSL29 z-z!*2b#)Z%}M?9xHL^a45GkFT3tg&kvv z3JQ1kU9RgJbiz{dcl$bjTlw5;iIAB;xpYqavqE$-$=r08T|{f?Lr75_9ewU&-n&N+ z`(|>5JekEEA1-W{!Lnnm^2LztpkQ^#jjn4ZebMW=%^v(`!`hmprUPH(LOg*8-t}7| z@KuL@B)pX|cC@#W@s?Zm`^-MNvikL_KN&1*IX7nI5(t^5mOnK&QGu{}6jqdMpoFxu z`P&ootH!Hp4V41ggNOy*Oit7=r(d4w*=dd9YsmDiGz;N&W{)_(*SsY~@m2W}d=qqGg1@n@#RqjHobz|74$y{zRON=Ev{68k>T^L*ka)WW%L5r0&?t zt99m6+a_Dq^l9D&HJED>U7EJ>18)b^SkEW<&ui?5Aov&9(M$b7~)Fa(HzBIpybE$jW=GD zN3Du`3OlX5QZQKy7x{i{(^D5DU{TA$3ps(mG>Z@7^4f`IOcj|~z1UT&ev5aLWw3l8 zD}`|R$Ke%mK-<*^WUF#)z5XHh#9s1j`NA!O4H(>SIVtr&VU3v?O*roOh*96}ldQWD9I+zq0e9IlaJZf%bmu`hU2Va5fIESVc)!!KPSIoM}^Odx4BbdbJ_p6@8^bnD?@>#r#MZ-HFEX2=~CD!wU%|2;nAY7_e zA-2~wfd?>Hh=o!QbYgq(Mf`S4nw2nQSFOdtEmql0@f#^S^oVH(z1bulD#_A^znYGr z7&l7kSuTJ{MxF{c*^9=k0B&RX2ctN6{9D133Qj~hD^O`93Y*;#p4ZzGmyVo)7G zIWhr?!KcrzZMT`eTs%OOeeGO~H+m0Z&w=vtnF>Dn$~?60l~XG)QjyxFn?8^D0^L8lE;REta78F9z@wVzQBemngm6tg132>eu&svGsFu|YV?#|n77|kpqhkBHn7H*f+*+5FWo8potN!$W2j{>@e)WtO&#M<`t*sLJPiaBRvu;8DgFyRle`O z3kZu(03`R8d0T~Gu3E6bos+{~>`yiu3*;Hzvv;ggj_*qPkE?>WGw~YLdcP{APHGyt zPiV$LJ>VO6CHRk2@asVGn8uY!;tqvg2Zf@{n+zS*r6SY}=3mGPsJtjnOdS;$+)7e* zoBpk!s+~X%CX*)m5(8_y1_`pa9PSHO4e%Ur0y>p~EqNZ`ccR zb@Dkhq!8%>8zX+c45F}^yQ=y9`m>YFIO}m zGZ|!PMYQVOVK8$|yJn#BtJc;(zlFgNOIym~s4fYGn@|9=($gwQmbjw|J=vqVmV&u#n@UFChR zCDou?;*JG)^&+FH{xoA*oEApq*N;|(54;Q32PR&5udmS|_ne;^C#m}A_j_n+&pFLH zNi$s|6Ko~2O=T_(7x#9Y`fOwUShr^c$1@H!FZmB88?Uy13W&NlP4HlRFK>Kch0Yo@|H45Vu_STmr+a%2L_{=DGkc=VNzJUzI&)3G{Cv?(+K%{T-F%f( zr=(o)T*mx|dJV3%oTz8nTvvvCm~S)8I{GDt^q>l>KDD1PdkbfumnjO94SpaKGy#;Z zHXDBdOW!v0*Dtq?A4TrG*N+r>x$CARv{auSn!EAACjwXWI{Ekh@gk=8F5=_;y)CNu z=R5te#l*ifPdGb7{2y~czv0{Axf0w9A$zStHeTGgd|9lEqKYl!8=C_kK@Si3@29i< z&Ey}*m~+Jd4+{Dh4KcL6pPq}pw762(WOkf-gb zTQ!?^^Ky}V6yLvGIC_{P(n3hjrAcyp--LZfhlSLFrrJAzuV_M5qO>9RLR^MwxsC4~ zv9Y$}NyxSySo%#Vq!7jCAkn}h9kJ?|fR-Hh#=Y=CF@3?ITrsP8_AkCn0+7J_PhJRaemJL*-h3xiiL@&MF7=K!( zpYMi<6m)$c+cPk;wIvQ>J>@F)${xv+d*sZy(9X+EcP8!I%C560+h&z0vxv_(rOjb? zc4vRS-nKfc>Wc>NxawJi>%zswzQU~fYyhaJn~VG;{Pp{vY!oIcOn#_4G3fkqYf5ic zm^jG@2QOK!9o&&4A#{MKB*VC!G&=KeP1E9MEK5JBX!TCWbH4;^MS;*Z^Hez1cRMh7 z97FB4HVem$==i*Nza4znNk7I(UTjgp{(-{fCDs6!G?DO#VZOlMk#Z?#vly}C><^dW zJ685_tbq{$*HbEQt>U6aBaitAS&}N0gIKch;V;`W#q4I%S^l!`)Y!AMs){86_CMPH z#uk`63p-uwGk|~e7^k{LEU6Fw=rC;L_H)(PNVL?q|HcL{-Y@|xz6i~8O-%W&~|Jk$R2fCixT3E>&y@O^ty=VITb z>tD^Th>JCw%p$P$?=JfY_TKd=(fg?r)*ZX3Fz@C|)^t`|_Ik4_=qYs?o*@1!w>L!u zWp^Lazqr?I=QpXdIBiH$FR*yD5G7EN_{Ybym?tana1j+*^F@%=kx+Sa9@<33EN*^p zE+DsjHKEB;dj2#$H1zh5{QOiNW23dNa-67gw-DlUTPfa2+ho< z3{O>t@+Y%0ngw4egGwp{fQYY z;LGLwoo1)$Q_1MB9k+?hUOWi;B3}HkMaqu4YM&dUq090xx%sJIFl(uhzx|cx0gI`d z@+_b$DW*3}+habmE{{atpJifL~-g(slAM;Y-Lpbw>3~ zLy}k7cF&nNm)ATTzi7_`lPqR#aipj&fi{JzB*5IyUpbGE%xrdh^r}|QwzajZt%Cb} zZU3#_;&vq}B-XwOqHuWDDL;U|x0&zfn> zcuv08qJ1ik$Zp-qU8j(FB-Tu8uTsSTWPIeonO;P4m<4!wH!_h?{Zg~_8!rCry{u~{ zD(AC4=essN9x}9)V)2LjfT|g|zk%==QGI)-qB!oGoMDNBOE3K2lCJp<^{kL=kgngR zMrq-F#mVwzTsz{{);=y^XulwT;R1j=A_x#MU#`Cy>QQX}^F znP`$=%noYoWct@3nq9mg)mg|3>R7*a&o!Ws2Y+hOttx z!elKB=HQ}}|4%vARB@*`Wkak|7~+LT2~go6L+dl!h`+YpYFpeEj&fR=$JlbJ*G%aK zCy5NchzgqaS&1k%U@1OHHmMWBC%2Mv95Gsir(*lKT>wviBwKM*G1SvpX%O{gmtBJ_ z53v-Lhqw0slb406~ znN|*7rMjC9uYO6P!!FKUsE2fy3W?=H$R4WPcm%1gU1oy(rx4TTfcnKI#r64sjOF+h zFb!hM9N5+eEn7|H4_Sf%8@9pSt>!k7dE<8w4^-F$0>B{8VqM*)!OXesR(p-AmH)&d>rUD5L5L!cQ`AN10`rjn zNm=JIgDmlK}ID2l^Nu=emrtx#B!*yQX%`7)~#7-9guB)t81F=gS4{wbF$ zygXoJsXUc?`(h0qitmE?cMAWl(V?(;)~wfDh?6DCUkA_N0~tn%+gU>_`Dy_-0DbjT zpgRd_>`wmlWzeW5FC;#wDXT^| z#mF(ow18esJ+-?#`e?=_U2oBKKiQeQs)80@4AN z%6z}>-HpNE?FYCB6ZhQds7@c3)>C)b|Kq4Bo0DDLK-v8MUencJ~ zYBKWan{mpTJoY8aq4q%v4xY&rVnKX>=tqmDr<&}@4C|SH?fBz@!R{V5fHE6XnQVQ$ zIX5Vl^wrEV#Am&6Y-HSp(ahWA-w@u^D!jRkxcMy=zoMW!AIHWjY? z7*o$M^OelI5grfw9fExYyGie$z`S{@r#e?XoD8iHFu<-(_R01S-s?kf&u!00sl3j? zJkL78jp@@LN<)~+zo^xE4(4L?bZ?l-22gp-I+6UmP@dRQj@jhq$J7k3Qr=9k?>A(; zg%c=F4w6~kWO#umj8B8>i5=$L+upx^tp91dx+dU6PdtlvYfHbyK!i||KYsh|^v`2+ zZOd+MP`RCq5cnvjMJp_nKO|xlpmFcIE!Cl9J9aTnS3|Z+(Htj3VwB4pmgA38IU3(@erOy%9+B>P-2eH$Vry;`wq9gN zQRkH@g7MMo)|Ly2X%z3CyNyI?F*e<)$8?QWS9UNVS=iMZ-^w*bAPuQC*D$#mR z+jkOBcjbWzxN&CFgh zxz2tO?T!jhgiI5jf{w}}_R;L~7iRrWI(k6XSvitY>A$;Z&9yZRnqDi6c;_`kv?@tT zxAde5#q=c^)fC-FKL-@;S$dl?S=uIgy_0+Rs5ln5)Qcw1w8zJiYBhUp%^;4+EJD=Z z=mi^Ubsxc0=4)+Oxn@7EM{EUr3drteEzo=E)k$+KAX)Q}58AqUZDA{chst$zi^OMS z8|2n&RNlPx8GP9>GF)={oJ~_H5d~csn&+R#eE!VuY_$Zh$5r{bGLf|DZ{52{G)>eY zgpU|tyNBcwVCt=kiUG9-au65AtSl~ellTE%?;{THtVUdQddekm%zRs5XmOFt`i7QY zb{9(A`rcffPh`>I!f8frv1EMYHGnt|^4Y~vjH5O0iMVqKL1L*BQ4de>|8%{kmJY35rEV-C5zshuX8QKR$3L37XejI0A}c9>q7jdtAvUcf3-7X93CS;_4deM0UfP7^ys&pIqrO z*uD!$HK?kwtW|P8cXINT5O=Zg=gBxGIMkdyNdLmUHVn>OcjH1u4q7bXS>j6o2Mm(1 zc71MS4FC8L4D6u6Gz=c~Y^whRF64^^q;zok_iI}ebj(=~D3-yWbxy2^JB`|bNsG7g zY^55E?ijmSSlN{u)ofvg1C%wN6LxnMUhR@mo2?_i;8g^?wE?N%2lBkHk|Vg@?9s2! z3Lel*YS6q&<7)v!y^#XwitLK7BPRwtEfJ0miXXk4yx3rV1sc0?bu`M8E+rEw`^|G@ zFpp`}h1~PhlJE-#Ck33&1i76j3cuHJ88IM0`!b|qp1_YDTgcpe+x@{-2-qxV6KL)5 z2K2qg{vx@jp7O{xIFU71Pxm0iWZ%FB;?xn=9Oz;0urFrz}+De{IA|4@NA#6G@keTnQAf+RT;1`oHFxegp_Sa6F)qJ_C z{mRsCC4yw;!cbmrbb_~vEu1bQDrR@ueaY(yhU`OX3>9m0{S+5=q-!^SAK_D1!^ZId zmmFj>J7bVsX27y_3G}dFZu(KgWS>ZGgN7bO_2&ea4b|4?!IeM!ZyQ%2w!!e~IlS<; z$~USr4=zFnDR0H>tCPHpRsm7jT+~4^%~K?kfcqioO)MW&y68Bul7^(-H{9jGai-%Z zU9AVSskv6T*!Lm(Hz%zt!j3#pKh%aQ?3c^k^2w6jwIAQYw2`Q~BWIPKiNH>+*0{!dL>6Ra>+#54+)LilFb>!OD)0eaFTIp~D>`Jt@kJi;e{Z{3=i!I&pUQ za7?|Wm=Z4HeSOpEPlim_z~ROz617fu=Mz0?Ktv^UsjsH_4Wdd9hTK?L^%31G*M;Wr;`{YCLp*Qy+<-}f0m+R| z4)>X+QZ1IEH~iNh>c7|cKvoP{qGFIzKLYdR3`{k*t#Yk0*jr#x=!!IXWclFfgjDM@ zB7q%>A<=i2vz1WdT|c!NF{xFL?l$J}JlXitMl4{G#_M`Icqg{jr>y9?CEi6*6(xJv z1CW-Kh^sQsRmXr~@AYfqVCriZPPsMq_jrVVX#0k|>Qt&D6kUTP?NA z^(ltm{Lp2C+nI$bIIq`{qhn#i_x1hl^WpqI{>@mem)=$BCyKFHWR=7iG&X zI(>Zy$!BGhxryR27KhOrt#v~}CcQ+QvO)3Lhmf$pC-;E+NH!m#dumJwz$PX>$|F%( zrc)n6JBd0*zU-G{16V9i<*#j}o{5&c<3wL}Jn9;54EJ66Tog>osz6* zk-iWRlyeJOxiTZ7u5L|c$V5lF71%bWn85u!67VFA0j0R$+LGYOch@_cq0IQfCs(6^ zELK?Z4j9Z>& zlLrOF$`i3KWi$t>yjTX%MvaCZ{jDh`RUAEcz}+4Cjc?@{+(x9Udavm&Ul@L4lfne+ zB5QxhSh=cVdF>MVy~T@w^>3wsXCOGM0QqZuDcly(qe{lEREt(XUUK^JA@t80pUL5H z!ZqVco~*X3RWnBs=LK1Sg`275=(uw<+7U>j^RpT|LsRQh!FSlrb&;XPesEZmgJvy5 z%zu&gkE;wuRRs;b?M7dPi$Y5-*J-rHGj$QsNDG>n z4KO?CI#|-|gga^@Sz<7e45k{jgRVCYgzH5llZxjp#&W_ihh8qcMtVKdXc}eIT7Pl# zdST<>%Q54CrW0UUa*01r{T01d8+FI;{(*kT<_-Pochs9yPg9y z{UXHyzfl)wQ*dBYF|0>;eIqLcTB2a;)n0}ZhOC_ctPco~;5a?Yf@T0i?HHDRXyXMKE?|V2q>;FRdT>21>@U6=;>21rrWA#FEe`6!h5n=v$ngI7m;%|C?RBA1m zT4q%)21cm%GjQd0-m7&kf0Riz8D5&uk@1p>xb>e$7Ci9>6(@GcSa72p-7gxWbnU7a zD*A4$KCioS2s1^6R`jNjMP=W*J=woSEjlI-Q|{oAAUtL?vo39g@^(!A--jRn`Il6u zm_|$w3bmz$Mg4`;>LPSQx;7i$vwpXF9irdc_u@z~r9Sg#{C-FCK;Na92M`E%w`?u? zg;ASsQD$4%F$*-wcqJiR{)rJ^h>ykocz@8(j$QS(&`(hMMZXUbO7X*nNf#{(IUl*I z1&u<^JB{G!6S6tDi%uwv4Hw_0`@?5F24we>5ddiNP5 zzbtGV8=AZq)SFSHboaN{jrtQA^=a6S@aYQp>u)LYi&oYDK9mP%XMifJ zC;^&E2 zRaK~_{j}NeunHsn{ymsMHJRGmC03&aaKqRk&TJ0Eji{HpB9v~~)2m)4fHadJ^i+ z^tn%%m-g|~=gt={0Vy!ii{Z^;6>UdA;;K6tR)2AL5Vz_T5$c;1Wek^5P|g>1*kLUhUUS%6FU4u#2^!+q3Fn?EpW%VFd-uWB zvMdJ~T+9$A^p%`w4$6SI$Bq zw7&<6O_fLz*N~~cmBKKO+=U3v+&x>UFFI91k_EeLX!~dLh0-mcX_j;$5Ov$O(xOA< zn!0lF=DA9Psk1Tp2#eL#gwoQsl*dV=W*;u0dT5RA()`!eW&fvO(>?mvtkP8FV2agY#(|nGzNWzUOQ2 zQ4P24L^ybP<@Uqx+X_QP!nvjNee7CvVYMaKOKx2#XLUg|JPRx{Di^bnS1A)rZ_H^O z78qhk4CynJ*|n(L4o!(HMME2?9oN)^rI0Q0owCvMnGupJAYMVSc6KD-eWUb zgkJJnq@}yRxpZ`%iLCvZBz8-oE#lQ6jT%01N5h4A>~5x<0dcojYNKhO%V8il2N1Q` zRP!8mThGtc#onLff3Ca7Ew5VDsfpE9xT-MOciW~wrJj^VGIoyFTIgcG?GD40+iqU5 z;v!La<3r4Jk&IGV2H1xnd>2P?GHBbs6Pw&dbu zVoVc|QpF#<%&G)ini778f4uaItV3P2#HT=qyEQnCl3!UnT)l%oJC0BDKd-#PE&rmY z$BiLP4z^mHM-OsvIx7Pt6Ilpnz?U*3wSPE~EL3ZNQJpNSx+;ElA}{9y=Y&A6*KHtV z9TJJdCRz{7d*)VMOn5P7dIn23PtdZqF271H;g3)Lg*4g3`@eT#{YTjGkrdF0tNx7592a~% zK%lcqO8+t@GmpIin*vMO4-ag!$jO2cK8RJd60-_>=&L~;-=fiw-`eA~T{Wj!8y&Kq z;igcZSKu7xeNp^vVf^T4U_?GUoi1sv>Y zVC}s#Amtse9lD*lxiB`BREF;Re4dnG;aLCnfu&~xoP%RL2hwjNqknmyGizhseLtSq zu|q|zs1Ur6p;k_~r6Y5ZynRH4QRN)~LDd5yFihRX-mWi`kUG-VuBJK>rsw>;j z*KVBy{9*CF&E-WZp_b9s-<%+jVl@Lo5dVZe~ zu;aqdH>Qv9V6cLF9A~n&y+4JBJcgdF+XbKvni?_)lW`Dv&xVC~nIUEaEZ81UM6Pm6 z?GPX0Csr5Z!@z%|w?Y_^?z?X`OnhE+DK^Ju@<)6 zhJL2SmqSkFlL%6*qRvw4oN0Rz#kpaOA!=e*+gBJ| zKWDy-Rbo@AiyjlfE03#+@s>y~sr}57XRB=DW0C?Dp=L%GRNd)anV0=K)~}0?mosh} zwJOQSV)^r6*J|n~OEfGK8q!6+q zUHO3O4F}^9FzVa-u_e$9c#w!@B+x`jnK{ znE@hoMoku5)kDXl@%HMZFiBQX*sJ?aZ#70}m_eEQI}yK%#`C`A{(=&|L}Yr{WvP=b zHU<$3LDgrQx{*7bO|^wAgT=Wn?}rX)NG+e4UAdITE3%+BdyzFXdm7;ava1$&ZfW1D z>dQG2`TFfvG|Cp>K-JWA|FN&E5**e8a$PeX!viBD7Ug>J(OIO{aAW>K+Nt!vI9-->HIq5`il14nTg#5MEHw|4ROf>Ezu{ef+hFNkSKa>o zzpmZ?XJfzVM-&p^zzj(YiLHnfCY}6NAws{yP}U$Pc6EbPy!Lk#T}qzFv0oEeUk%Bm zrQ%5@c3N6{nbT>@J@{!a>(2;%&V0YZr8J*YpIK}srG>IdqGe4q>sc)eMt4eQ|G*$b zho<#sI{_18kP>F&!VGfWT8@)G=Zn}s+ZazQdbPeBS4#$Y;XrnWyNTI$jS?A1$3Ihr zdBz?Zp7bEWfa==yh!CiZk&&4{uv$86HD?DC5(uoq@yCdPVueX8^?kdccuU0h+r6TE z6rM8$zW?)XiN8<4@1DFm@c=$7w~0TAqUxYB2<1^Iqsb8s$5nMURZhjHPfU)><{L6@ z?A9@3NiyX&;}@festUQD)|U5BRMKCKWb7S{r@)hXc$b`xJQic{~$P_K(2 z3>$CdB2ZyRcU#3h%oeEL2M|xsXy3QBA_Z>B&ygB~$Hb`TaXgr##v>8GZv3+nqibZ6 zQ}jp^S>b&wotaSwMC+$yuO zWF$1Zs8EP5v|i30Kc~ZHW+g}>{pUEz_<$Fb z$#or~;mZL+Z&YE$+_XFoYG!?YTQj)9l|^SVKBMpB82>NEqA$g71i zg6gdZ$fI2gHp?aByI4lx4R(_)N*22v%lInOUUU^<8}i5V@%{Oo@Q)cnrz$!*>}A_= zbW@)~_HKL{UBZ_VI{z2~w||CuY>w{W%e}f1hC<`3OhijG)UH|AmEKvk*+1vVWEapr zv_qi^in_gcd$~aLD#nzA$n9j^`U$Zr1zzigcs9@QU|pnA52U>R{gmf$hSKTIRw|7R_!e}l&IZOhD@tM< z#pniD1?^Clq}h^?Zg8xxRnjy(WhCNHCcxwk-G0`|Ywft3EIyfhgH2srtK}O~iY}2q zb#F+({y-h}KD>~iH?d?I-)d5<%c_wlUs#cR{QRlw!QP`Ch|@1W>lx$W8edCbq3+zaV<4C-rdedE9EC$^a)>S)NttHmScN`~aflUM0RV%oaM10AJQ3nhmA6JOJ9f0T!NeqRWO*^mEH5f zt`(}ztdRQi`JP z9it>4Zq{5>`Se-HX<;oC{wm;1*2#S;pzP(D4>C(N@G1yjrPGT8%Er?m)}kV{iP5_G z)o&GY27R)A)Vof?m<5JzY@>&H^Er8%$A+zx%luc-{GtOw&*3hgg9HEUqKH@Jodwz9 z^7@ZV2kj>ued@~vFuG?tn-MC#(M4f*$-u9q!_4@w^FJ?w)qcV*6<{Bu0QkxM03-2ZaqqO$0Aqu+OG))D&YZH5 z0R@rNGS0&wcM3c!En;PQFC)|Nu#2~09jl-ZTr1aeZMLP{e)vOo5&4}q0ZQ}46dtiGh3bR$cNbZfI<&utQSo57R~3fIxT_JwYb^h}y`{GdCob z8ff?q>S@CzD5S3tY{0TiPLZ{t4j8Za$8h>itIm%*!sV5z@Sp@c_4RfBUvi00Z&N}N zY@_ZSf*405V#s)ZU;P-557rtcCiwDAkHWJs`m;`VIgi9z7fyAHni6Oc3@umJ?RnfB zZQW4`$hJz-1UKVGjyu+RpG65j!4OaQG45wNt65B(zU==74D@x>YZE~I0+v^p9PgyZ zMdAujhpt=9z3_1BJxuvn-ze9WdPg1yh-N6V8Os}`FcF^8F^PTFdv57_MP~PWw7x(n z%&W#0lT*>`$tKqlD;rQAoM!tI(r0WL!{ZUjRi7Wx%dlR21ZXY}HV5@mXmOr&l(Lt{ zTU_AkM?lh&xo;==>QJ=5=aC*W&SQ={o+qG012gPMOOYoY} zBfaiB^i6ALLq4MUZ3)Fa_xky2rDgY1sbdKv-QHYthM>Nw)YaU&D84ggy;%YU0&>t_ z>+cg-1t)+RK&HHAz_+52Ay{yepC%29PcFke2FJf!t!4}-VxnbimpevS4o2I9R8+3j z47Sh!s`H{``wdSxjNfkM06E(#jcP01kDJi5C5QSxnn;SG^?Ulrb8?hR?BYDb^e#3R zGZuXbhHcK%2ccU~DH{h>bJURu4E|5)0g<%saje8*;m9c87G_?~CV?ma(Ybe}JY_NZ zYJ>gwh$P!F++Qx375z}#)+e7LWZ zDppuj%86ydRibhC{LM?TzO60=T(c~8yTrY zz1WgkE*h+V(ohs_{tl6{Ok<2Nc@{%wqbs-9xX-W#`x3W3rMsu%kAd!Yi-=RhFf!{t?8 z<%x0(}X$n>Hr659DFPfjSA%(+S7M06V8cSmFMOZa{ zlpUL@q*<&rI&yl9XMbzsOEd1J%cnfSKg-EJ$FGV5SeNa)eCh@{+}SdRw&+YUVMBCv z)kve;Air?aN;%jf{}}NsCtF~D({gzH-4dFkr}g?3sl@8S>&|%p_nklgY*`Ab3#-B< zFcjQW>m7CEL2)jM2UNG}>eTp5k%2^=cOtX@P%d>50Bmb5nCZ}Y_DBA6s@pqH;YEs^ zecH=eEbIxzcbOInvG3uh_x#VJ?d@vDqxp;sL-)qcrJA^&+K)W+UPkrzly!?%6$xTk z#tyh&d-YV+qHcA%_&Ryst7Zk-9dCQX)-Em9(|%@nuNM{kJJn z0D^FeY^#DZOhp7ibNTwN4oDh_$YHWuLxo8a%Ni1Q)_2Dn`#w#j|6J}UQ`?&cplD)O z7QA%kf)5j}AXRUiPy#5sMKu_N$>gKr642AnLwm6oJZ1d6c!U2r47nqb{h^?|D3FB{!wy-cm)w z+@03|ZFlH?R{NNy)8)KBL+GgO*R6lqQOQR1P0+{!x_sHdJ^pn3>*H!N)vU(~2~G_7 z@|l3F@%>oN6s~uczBd#Z>YD!X>J?$EUh+8Q{r6eJPQQf7Q8M>%JJWY4XdI?l#)h8! zRn#2CdtdI@!X!eG{GZ%$#zyPHfS^O z+x`eJe&U%0k1YIZbitm+XsoKQwAu2U$5%j2qofY!nMc$TUe+}a02E{VR&3aTv%GDU z17-;iqKjCvIh9#LWAA@|coDP)o5DCyF5y>M1Q1(jI1+xK(y0sLQqrOe%FRBj5aW{P$Cy8>R}gBJIg17N z9oB_@t@izBZKNf)KnI@fH6;MBc`PHols;3$$k-tMZ?Q~$?hu$cHU@`Z-phn&Gc(B* zIRm`!>%;(&XX!m`RNV~qSB1V|&6K=4`OyjVI*s&06H3uM_}DbqK(sxcby;iGv8!X+ zHg%JJNebMWALF}!DPbF9BI!C?-anB|fB0EfrIu|bNoYJqt#_rM@Ii{{aO0mmC#Ewo zM!(Os1uXL!gvYd&sJR0-65)$O=~-pg{)UP?jxQb6iQ>#~)ORV*&p76di&6?yOPNL5=R z{DGosPq8E1vk29UxXfh#@QrJ>r{!${9Oh{z{UN-dmpn;>pYHWPgVdBJ3rq?~zS3WS0ZGr2_J97-3D&pY~``_V`8Zi{9f4xI%SjWlgc7i|Lq z$)!-JZ3DQMEsNeZ^S-lU(-g>ekA+;cJrm;agZ#qf|t8dBB>7>?+Rr zsOa_t9ec7J-|4OzBo9%k>>8n#jg|^OSl@!!HS>vC?_9*QGo^0;VqGEkzH?_anP1>T zr0Z)?C@BJzjA@TLduHu$IPLZn(|1W$Q@*jRR$8L(n|co5^%0{7>-Fx#x>HdhaNnMl z;LxGK*vNvI%B$k*h6lLXmZ4K}y~Tdxz#EkhjWU@Q+EAQXA=x`p%Vg-lPUSBJrJS30 zEjM{ee{Yzd(tZ#Bje>P2yukUl=Iyf-?c*;s`8(tzkFP+4HySkVO@*N@I$;!m*h#LM zSu9*|1{7m-b=E{B68g8|$-J3N>l+r2V@{&I5f#ois4#OY;i@AD!J?bEI>xITmF1e_ zX|6Y=U0mCE9b00D@VR5I;>m5huRFY=ldNuXbETWjgygdisa%1?xLyP}^7T_X5AlT?=-fS{0XepyJkOW)J#YkUTwhGbWRP^k_nJpRpt!*|$_x|c( zeAd$|KPm-jtkt@{OVdYH04n+UK;_8Qj?&sZmO;x!!eeVUU4;L{ap-RhRdVDx`f(q5 z9RZI>$xX()c+9ZJD`Z*+?E0K1)!*^*wU0?1(ASp1ZE8n(y@T%1Y_I!z% ze$0)Vj;E{j;WbDe+{fS5=%Rg7EI!eKCL?Eu0QJT73kspL(HX(Zi$=X=l)u&#jou6E z>9@FhxWZDC(xnTsN#3UzN@C@s@$#_n#Jbm!JsVdmpo`PB?NM(`ifdW!Sj`}d$e$mr zWQnm3@nWy-yE72_q1TR>tA0HbZ{U<)db6f@JmQ>5DSSnc&P?{Vk!b#v-n`Cxhls@O z0`QE3>T}046>9X4DXLCZNY0lD#WzqD48)JrbKU06%vHZIf!3}lL8e?4^&(VC+-aHP z)SnHt1+V^wyi+Ra&DuXRY#i*lyM$(9Z05G_`3pNXiQ+XEyh5`O5y1#wVGe7%DPNJh z*6W^(y)b>Mz`5t76aV*p6_94RNr8{QdG9gtbmYOI4XuFDTw}(*9J#ylB-o&KeI6v0 zSkMbV^_MR-ER*Hv#~>#@{Uv;ubm)BswZmA=sC(5nu3PA}0vnB7y+tat4x7P}?!`ogE@qc6mx%1(W~UtYTXj-s z-7)rr`EJkI4~0%r3*~!THQ+VEf#t6KV3X^mKlUh{~ouI=Qr!}L-DY7$L(E( zB`S_Tk14IhgC#0>HrDejnyfxxpISX1GuUJ@>xxIkAhi{bJ8Rnnkg74E^qh_u#ROiq3LfUZkRabO)`X+B<67h{r>l%>x4_dir2|v zFvevxpZ2(99ZcwdRkt%FyE$)ZBCfMif_ooP7i+VNZCfoRO>m5Sr;cvrD1+M z2i2y@;Xy(lf8)R9w26sU&Sn5*Ee)Fk)|}NIbmL->ubT;Admm9#i#zXhT%V3t&F8-u z&=Y>U_bGDd&`Snw_xf=IQ`#KHz01h<}5yH(q%x_*x%S(y;X%jlM5)`J;k@cWRA zOWRjrUvUuL9DQieJFx9?b}Y7|osA%ddWt8T3&;n_W5_-6C1%rJ*#RJNWcpl5mdC2X zaX)!l%GWxbu-OxXe2KDrsNEK7o+GS~v-=@k0V@F)UYSc80^QJXWmSGXo^#$EP^n}N z)cxXsHu2oSZh?|lm{zPyrA~$?!n(U$+tEmH_zUS2M4#wy3+^@##m1B;a=x?gee4?f z|JeHKxG1--?LkD2N?0@lC?z1}&`2nU7?4mJ1Oz086c|E~P>&#uLpRD$QX<_T(mj;q zC_S_^Lw$Sjoaa5y^ZWK6C=<-yYp=c5bzN)Sa}WN!OJh?uSJg_yzh8_JoWBp>Q^LPN zOR`Tg`%f=fUUmFR!SWmm<%%Qd|z-|S_mVvezQNRn|Z{un1mATSi3e}1zX{~B~w0x5H zEJU9h#|JN0s^TSU;)KrquL>1GLCFK+kfc)8gzLLXn|=d|Z!BD*Zbv`{W9g;_1#WgJ zz4fHlFFL@>@b))@Zc<%I1q`;T`w`vXeN7xMrD04F-tsa{h17buey-#8TlyW)K7w+o zelT*?;OjxN(w^0Tm$8)1M7CsRmb?AU$GPjmpuE(>-?IHzz5{o+zf1@?h~GfL58SfS zgVwwZ*20Jg5$~pTiA4*-%a!r`@Bkih?1J3OOQ>Dc_CAzj=bbzBF=1=GF*=I{t-wHjo9RqJP$TS6Hwo zk5Vo9Q;3o%CBDVH#vy$|Jb{APJtaGH@r9>vk6!X1jdk3u@;@siX>k2_)}z_vQ?{E@ zG;$wTt?%loGbalab!Z*c|-9lw}eK98oUs6>?*^>`(AyMaM!kC!7Yke8K_s~T*0{0&V}bMYP0gUTIW@IH>&3uZPuC72PK{GKVo0>O}l(V3Ttv@lwP zh?`)P;qEGUb@+PJ;EWI>CQlA?f~w=e8N+!(1sCYe!vtD1sQ;JIvv5tSZLzynQV8l^ z%*Qo!eph&W&1V2_0Ph8DpCxbNok2INg;OuD7Xg(@!}$A}lRso~sXri=N2x7SAYB?) zeME$=Y8t9vj^8CzL2bl-s@uye;2j*Z>4rbXAQ45Smnsa}30SBzYikD=d=Dx?*E{VD z7kr${I0-o0?q8mF7<6xWDL&&=4*`@1jI_oOK@#(>QxNvc1?L~Z)s^w_Xi7N!^F#`J zI}MB}swZlj#=I-JCGL8tprb&`hRgr@qU@o5j&iH!wVDoBzsl)=(g7|m$yqsqEIa-6 z{Yy4m>~Qu0yLwdxEe>}4if=tl_Tn!d6^RVazWgw7(M!^=68nh1QU;gaqSSB~cNQK> zgpG54(aGBWOW6vl<2;!fT5^Z|P%k~HAG#zilU&$8u2NUVJ5ui^4LX%dHoVZ>yK#Uy<-qIT;u}{CG2MaFlBOsLaaF#@_MAWa?|N6Lob?0 z6!KuQ4vCLH!3Kpa9Nygr@}q;}6)XLoq&Uu*r@t_!dCf}Jq;O^!;p?h zp*E#N)2Ku+p z8-n+eU6hI>tJKuFWn)%Gp}Aas|H+@29wlGFZr@P8+x}H0w96+i=Ray!ntc1cgBp_N zAyy$T-U!Ygw#0F=PjcacKQ@rB-eko=;8`jLh_h(#Wd5*l=E>Rg?b@yax!VHzQ{wN{ zfPeZvqAN|=l%d|xxdieBRzpd1(X;9b}38K94LvSesVEU-6Z~4^k zIg^K>*((xC#7I5$0QJH8&#_Xt?KO?=dtZ9_{6WWA(&3ldCbeEBsu@|r>|{ej1SM(B z>u%zl;N@_99y&kS|J;P9>4$NUp4nUC-h>FgXssl?5)#2iXD$-Ctu`Xw_eJ_?2oCfG zo;^ev{FAsuI~nn6fu;F_=qi3Lile6%__c?|O|npxDg`aS2Q!RM;dE7r04 z!21q8yYtl3ZyV2oD{ic4_iqGUz?%eMA&DXqvFOepmK#9gXCWLVPdx>{053>10sr+Zr$O(;?u3gC8Psk{Vc{ zndK^HwBnZ??axxEQi6<7oyl-WPlECpgDws~$cuK$eSQ1;UQOUFa&Y@_uvyPq;#Aq8 z90(=MGGi-|fxa5%{xVZ~OWbAl<6C4+h7-R}YDs0dk}{T3lZ7uCC4C!5Hgccv=Cc9X z8_>W6V}z*s@f)xA1$HV>ko)=>eHCbjZspe6yCqGeR*pM!lXr4*>R)Yuk`{5L#v9XY zl7YHi-UIG2YCFvRfhd4&pi3`S(a%V_j2BLQ0HAN6N@OLph{!H9a3lMFqQdliA&C*IByWbO6va{k9NJ#|`#%b$ZazB-gzW?FLct1B52&rjvmnJUhNId73VvbGO%0I~hDp^xtrv&RU z*SNu@cR{Gcb0l{6|kb2pKeBm(EyVyo4B>3Ac1#0^!)jzgGtQMJVF(V6czJ3;(Td#Ahu z+O>y|nNKGcBw^#C?Un<6vMe_T`{W!iL{MOEvVG3}gIk-3Byay7_s7@J=Aet(IPQ

b{IQDr}$v~|eZ5Pt3JN>6;{LXvlV0Z(i!n@g{kD6F_RF}W9=b{e>i?ttQ%~HEJ8z&ptUuo6}oFEO70p@(NDVTp8~k-J0TW4w_ihgMAD*d`_~D&Yx30u!0SNHvTbDuC%CV(eIoelQLKUOet1nAUg4*p5V!pD? z3_DDUSKXaBD<8F$@nMOSLd{?Pt1Rexo7Yf1q~Ze~dMyzVYID$pQrMY`=K>rj2uzLGE1ZAfiuJ$h3u`sb`uxnEB!LAO9@N$u%^|av>?^g!Xb}#vL z-LEWn#Pqo<80~v&q7}DxysA5clrXFp!!J09{6L<3O`=#979QHK6X!xERCjH6lZba4 zSuW-MoJ2Ihw+xD3oKEKQSMJSqa(KHT;d{bl$0t&0dLzOC628mPz#`dPsDB;!o%Z66}$3=T+X^S zSr{l{ggV^MKQ<3S8@*KgPppEta`3V`AVzyUuPaP^;p7$lW(B-Puk?i!`Q%ph7O8xY zNYr-3dJ2sy-a-W(9?FYzNms$!Yogi1?w6O)Y2+R7evp=_Leu0+j9gWzkDZR?5l^I@vd>Bbf}cUAm%^61T3&Y9Lwv_>q_{-cq3~| z&3k)i>&N7GxswXZ&EVF9(TyK@`QE2yHX315ZM=e%MSZ8@WgI9 zQ87(tnb?w)^-RvqqLj9P1njNu+Y<%YzB;#|OO0-=7#iRhbD*>4n!jP`JuE3Ok0djz?OMSx1|X)X5gdlG+p$ zAEv{{DrNfAj$f@gzC+r(pck%rCSu!bhBZt)+1n;A?UfO~@-Yh8UVrJlULSC&h>1I@ z#H}Aud_SvHMJ?SJ3VFi#&69&>nfa|=_?(%xJB`4}d`-&N%AuOGG7>Gk%kC3894?{u zIC6mzGKYvoCZHp`SLoq1f0^$4EpQNwA{!Jpnt*cKn|w7WKG>ypsp)zA`Wfm!vsjuM z+G08Is_5Wb;g_UQT)ggfkivu!Baf&2%cE(<>E4p<-LXQ=?wYg43^o^cW^@ou9|yW# z$r_SLgchuEU-tho$1>b&C@1w@WAatH*znCQuS+9cbh<|_>AoYe`L(WCRpBBKCD}0vxi-=UVbIJC-i4kCi`Hp_Sl6ZN42U_ zLCo6m&apII7;7vketL11IX9)F)swr?sp2Mnsb=kKd|d$}<}Z*r1k{?n+g%n8L3Dvo2@Rw4qvDfu~NMm^O$S9-xfRSpW*x z-y_W!n1hRVT;K8QW#|_xVUTz9HkbGzTrMu(>3Wy>CML&Co+t)caSi)=i`H@=F->pv zWE?KLIxE6c8H_OG-5*kfjZbGfAJfJ?1O%tQ>~@s!LLN{xmfOpo56-xH{G5B2#CITu~#Ce#DWm#eTBp=Qv z-=opOvrZ54RNE~EnT`(VT2X?{)Sb1g>v6$cP+>d3EtWv+{LdJCU$P>lx(O9vd?|-4 z3yB&0A^DLEp>KJ9gQ7rxj1k!2$VngDFe^69hf3L)+*T+8cZ6)M`oxYIg|a7)JsMOu zkUP$}lEeRIMs&xrGI*zRULz}<7dJnzl(Dq7xqUlqIH^8ug9h&GoTcro#}~7^+SxLY zPQyFgtr`-Q-vJGMpJKIG#BF@h|HKD+UD=9H1X@2h7=^jnEX$*?SXU)bm>x@F1K;PX z5R33h?mygl-}x|p{|dC#s%x1mX<3pO^0GREOL>9R1p@hz98Y5DhDQ4;Rl#{$QXtO; z$l$Rm_}F-;l_d%G3=8{J6-S~Gb*vEvl8K`?{tJS@o8~ps(QeqQa?zpD*J>`i2e?CN_XnoVX6}Qq z&WL8d;b`6uYEVN=DerrHuFUv`EZVxGCdHqI4bcUBM~{I4OV zK9$-w_=r#d@Cros-f1$PIQdjrEs?@V@#3XLOr}ow6#jwcZDlW|C!@T6$ z(S#2JBE&eKz94hx-NLL5VJsA)K{+)p8X~VtI3Dr(JzZ67QkhigY7Cy_=QLCX*pBru z*Dc*{nV*4QKhecasGk;aj9*ZISNN0hurkKbFUNpFLP0~GRw9rMCmS>i)+_LgEa(p)pUmCltRCd;{q&w&RwSj0a0 z{Ym665%@1kufyPRFTVyCO%9ZN5l=+Z`ajU3u~t(>lor z&(;L}-8!!SYfzz00eKaKNCtz#8HUti3xYdHErYzW)3w|qa| znVB;~tk=ns@=|Kip6r_0AQjX&dAo77-bM4J{&@Gd;fdu!@?+)v8k#NA(yC<1AOs@~ zs}z<_(r+h-JKmQo2e}|yRK}KkvZzz96I4@qz{!BG$0EaY$V7=6Y=<#Hk6P8^M`_rO z?~yfWc{iSEjuG#g*h=wXvP=1_2u`^BKE?m}%2I<%9dSog40-;D;B*WxLjUSf;W`8| zBwCpDlLVgC#U#1`r>Qnia-q3Rg4NPSV$u)v!s|?*X2GH|sgj4e#8tV(^R=CA+P^Pu5|3z2v*`L-2{XDG-=7{blD#b2II*@bu<1H>OlPq`eO z&Kfn$Ur=$H$0XtEua;jF6`rBQbAgJ66r)1DJo+S$(*-2_9?Lzae|iC0>GK*_S9)5M z6z@MJ2LD`;0>0E8{9&Ok|rO+iK*=<*_#*8sW6y!g>ggxg`gp0Ntx(Fz!O( zhZoVtsIw;{EUf}eJ<=DL2)-$6?|MKXYl#G@nDFb_kX7z|XJ{Z&V%+-9uncV>|9RXU z;x|^ASN%a3(_GxjTm7w>yf|s#ElOl_Quu}x$Btf}*Ur3Yd8x9RZA{YeAIZbF#Z@mQ z`&YJVmZy~KQPKcR7QU@6(_~+G;LXLo>f8J}^Td7s7kqV+KR2Dh+u zg^9GvlDnj1HY|Pg>vz8oQL7}s*Ey0R%iDKkhlkIzO7&e?uJc#w{DdTBKw_lGi`gnGKSu;3qt3U60Z(1Sa1Hp=;)OTB|X>8h^#6Q$2cbVi|XxGXDWH^ z>Ca2Ag@h)#;V41u#J{-XMe4HPy0CB%MzDZii2#ffcLlxxA8P=1Lyjt8$SpyoH1&_B z4fVP=XGR*bu+(Ho@~9a|Gvske(L+3mqz8fT-MM7M5(UgB*$NBGjcgn1*f9L zY&e1oZ5A>0%HwsVhtF*pu{UFi0CkQPyXVmCptbYE_T z;BP6#ROPKhFfnKS9RKgQcXOFWhs52s7k8dE^{rJ)aY*UCE17$`J5@}&d!ee<$z#S% zctej-zPiVxZ{ziEn5T3ZHg1TaVYi>JCXxNL%7WvARo}+pq z*@JhInmt80Yo9vRlF#3}s@TuBMI&^teo1D&CudffBO2X(SK##x72q-3ZpYLrk`(91 z>P7p$2^=syR>RD{c_qS3HhBG8RVf#SnVuD}lESrLPx9pN=k>!2n?j$eVP?4}sJ~b~ zSls@_E-t0@ZhLE3gP+3m-qvo5U08+dpK8@W#`M;lm7G(Z7CYH^%5~B>6imOwb9Q;_ zQdGSCP24$n@yl{ZTNE+dsyX1RNMl<6%ig4C!473^`3i0R(w)U6jEeOvcb*wd)<7f` z^X0>i1fXKhK!ch;LXTEHT zk69)CR*tZnO(=R^&n7U5eugUXISKr&UPrT<8qWpP%FrnHTuJzPfdRKfPAfV^G_)rF zU}efrKBwYv;k6}qzcDMra%=ud+oX~RWyz~!Q{ z_8Ex4aX;f>X=X-s#1U{cj3DZy)&2i!dO!N_!o5)mF9T|ViwKt3fcpIzx(n6t zFS&E~+{SFj+k@vm>OZGmNF8|3XxO7DVz=F~M%h&;bfrq{p5te;hSf>v#PD*`gf4d_ zW)e>{&4C#7vrik!>?_Y(T@O zkf5Y66B`M~M`{e@1^C?~J%7jY!@Uca?$x{~n(mywogh*uS$`Zv;2c0P{S>E909T%m zF{5S;(L4AdDp-lLuW!-J$HJ=&Fhh#26d@O9cb$8@)iB6<3ni_*R341tJZ~3&^4P ze{3;HUaMXIfH3d2KVaj=m~wXewQ3Sb$@JS3uf)+LXtmuzNqOpUdaS@s+WpU9|#E^SyhwulM2w3s-x_lMzu&u4K=I4 z`{hE_x=#1JjMabU{4~0^pQ>E&a^7zsP6B$1AqYSkCZqPxnivIn3~6kF2=l{MK+I4B zt+BJ)ADan~F*&rqI;%#Y1oQ`DC7|qx{AxaMCGv+ApAa0ZmZt`BJ)%`N0Mm`dV$=!m* z*P8^6($`rCrjZ@3dP&N%^p<`2zaXAj?hll=N4TR^^#_Cb;r0@fR3cC(I<2Q7UCCXOa zmkcazRITN6VmSPH#wbmg76Gny7g6xsget6;b+_4{8!a^z?DG#M=B3aH5mi80Qan3! zljK5<+4SLF`3JRdt7KTG9R!i&H%Fhk(U*Nb3t`f6`u8yilr(S7kP8JQuLB7$PZ;fG zOTOxF8vc8c4U(|p9d`Q#fIQqQ;*+oWo2LFtS80eWd8xi?EjWFFvIWqk0W**~zgV1n zJu-uf!mDtL{luYs>tzuW{irGaoPI~>)O4U&*aDx2MZ33XKr&VSFR=l^=qEnir~%LB zZAxa|OJzS<7urWo<5%_OB)j%Iuh8~L1z#0sx1*Nc^RH+m<$x3)TiR|r+oGI z@2sw(DU|_-bYj~tnBMQ)^1ReruwNe?T3*{yD(rYG$G#%gF%S*K56C>EhF9eaDj!X- z#851eBcB!q2ErgaX7!+N{Ki-1Fg-`Lg&*55q~5loY`{v$i2;^CN5*%*-*$!10PrDu zZZ~3E9d?LDult*}EpQRemj?50R7t}6A0f2WiKFxY$t_6!OC5juo}g1$sHj|Hb@<{2yeE3rH7C{m2F2vLGZGulI6nT2ufEP@%m`Bi$OC;GR9!`ahfhJY$7Hb2B--}f z4wv@l#xjuB9fas7} zVuygE^cLH~N+vci&WaZzoOJJ4uO(t5eV>D3%Ig=C2ce`YPB+7*5l)P~VN@m@Ilb=Y zp5KCRv|4?8RD!QrI~v=n+S1jvZLr_1cttG^HE5?R#uxwEUMOq1-M=M5hs0xboyjVQ zvJJiVw2$_Y-yS6&ufj_p5dVb{Ra5*(6cMCNjG)I0!F_4KR(!Fyf>XwiUbJjk0N=v| z1f{_E&}d}ZIz@;WNZN!)eh+o(1Hu6)D`A5ZGT1|dmOlvd7x?;QNBP5TT)VEX5g%O9 zb+??<>2pDnrNqMS`;u66O>39uCX%srK@z9i+$QBd_x0tIU>?^ZNm;6zD4SzI{^`Dl z>H!HjJEE)l-n)!9p2W3*Y&zIDWtTR8s0lPVAXWhe8Nk}{G@!%+hYsPRVlAy8Dx zOE~asM%>N1+(9RI9q-RvV+Q>xJRpqe_>y$WxX!NmD?K_FY-|4>iMckQ&}>&@0(oYr zhGFu4<(8@D(-hfS!@do4$>E9iIbV)K58p4 zB|P8o$H1Hng3*vlUwqd6&VPT#}ErXA{QTF_r20h9(#2Fk-1CrK+C)L^Xq$T-1ve zp9#`rD{pzukEMniQa&@!TTu^^ZDBuvLE*rT! z8U2)+Fk3VLy8O5-gI}c&A++rhY(}P*UT_I!qYd=YT~ve+ZYi=gHQ4Xf?5ZZaNYV{uYhp6t_9};sR)j5 zMLOOW50C z;js)SHoalT_c>DbYpek~(H5C5vo%RGkB#5FC`=@qU(&25xlLM?&qcpW*8?chIJ+(h zgf=KQQO;ej@CEc8KzpcoaX53#wl-Wf#RZauNX&o*HSEs)DC-)>`6e_=mh9-|C4HoFF0t!kcDkdJu z)_hl7Kd%B@fG~?s5=s*@W~-DhntqF-mduiO$|DJy`1t*!g(>Z}Nu>aDK~2?3W^I^* z5tEp%STPDVHNeakJu`Gn-o(t;(Ttg}$eg^pSqBlgyE>{n`YsV|Zxzm?R*@%p`uMRb z#BWpZ#|4O=ucgQ%OgQ`MDgcQ~gF^7VxY0@!-ycm8y}vDCY8ev?a1^OR4IjV?1mh5f z-}?+KSUBcO27BPu@;eAZLCb=$@Dzd%8>D?ifAk1>saU{OU&*Z(YgoQmVz7Aa&gMez z+n=>Mc3N!7RbpAV47(a;NXU~&xvZUFweb0X)3H%miTjur?b-x{BP$U%N`&%Z%NYQ9 zauCq=E%w^hu6cjc?=!z;fxDI_AOR^p{Wjjzr9;`YQU5Q3=0~qjI9B$6mfQlgu)(+8o8TXehsqic|wiN{+&6h?~_O`5}(Qze{N9;=HN(pC{abV|iPKFFTSw3n1UyJkT3+V_}%F@a=643fRxI!>%Kb9%~wpdN4zB zuL<246mSi?WLuc?b#^FfnU#7*^0-K@-mrxF{ zVi$Hm3FM?x2{!k;6gPSveu!U#7`x`7x4aeqSrG&xT=E2S8+&#AW21Y6hQM?TlyJcN zfVvEckmf6!C6#p?OG&ol*vD5oNYd6)k2>ESO_Hc*X_vatk?)S~#7?fj`?W@<) zV(dk9XB3v(sW#;#SeI5cu}$GNqse;+-0EKT-U%t4tI8``j2E8RXyEb5%=EaJ_alrN zT$l*b(dq}vuBGOd%u(CC87o;DM6%GkaFdvl+IZ-7|DR^RNlk>AV(_}D;_>%}t}=?I z4WECJ8rjh8PbZ2KgwslL3Q2(?_Ny7UL1jsad0{(8nXlf(H$p4OJ$$}%yvjNZ9nL3g zFAc1*(FD=$ZQm#wl9CFrp*v1GOe~2+@20NhDS%q2? zEtdNYj%3H;Ms-k@`)iP?>Dw^djei$ql;}m$#$EzrFEfok8uh$yPqt41vP>}7_hMA% zy4V%;H1RX>l+NaLyp<(|n77X(wd+lJHA8;Sc)Jrk<`qjC-;`TpveY$wLgB*IYrt(r zqEdZTmM3>rN^7kG5{&pe$_;%jQZW(>bGeVBxZ8s30z3MOA7NcK6{p&$7A3e_#Z z9Oy=F;cQ@sU*e;GvFe?fvyfolyl>WUQheAy_Ju%f&>*JKJuVH7`J|WknBI6l!Cz7U zgfublDpate?#~K!sTVJ(_=$@3By9B4G-GE6X|sNE5pAp`xzI~4T*>#v77G9DqU=;C z^|D1wJg*wA+%(H~8Fkmzb8&TqdQ{WBc_q;l6t-yBMmli4g9t*_7-Oo= z$FdkocH#XsBEPG%*HHzqVy)v68N%c?8oTJ(x_2juYTkgBoBvE#HWZliw%jLw*q36v zbH2GCg=;lFN+vdEv%fphsm>>^N};(^V;Ai?*`dhS@)natGgsn><<^<Xf7H{jY^&2%E@Ue`qjcN-M?d^jAdrlvBhEM?&<(iqL ztx}a7=2x8|X=DOk>=mDYp{}-JrvaBYTih7+kIUBKa~iAdR&$fy#+vZ#t0z- zN_CMO>N+2+9u~tu2Uz#7L@X1H%K9oLa(`2n+^K9z2kv}OLq7s;{I;yFY1%y$ohoE} z-!M0?5`I_no}Sj=c-CY0@F4yj>mR>-b5DO|3GJ_J)eSuQ;KbQAgjP@G(``Fc)6Yq( zgy$ajat?~vDoVLv#JP9wY$pp%uTC&8Cd?~Q-Mis4+g8-6R&RFSRF^3dw^a>LiDj~NBmP%6Q ztC^II&{8z|{<04!%Dk?a%r|PJI|;*l7FIRg;#u>&mf7Cn$wOb7hHa+I{Ga$Ns%tlq z6j~poGGj2~Yr_~UHs{spJ>9nkT5m#aMhvjWsIlGeqwdQ3W0tFFINu6ob!L}Op3L~H z^`xuzcC#f;{kWeG?oF^JRr;^8O}_t)M3}UmImMJ><;7&GR4en=Z0}y9p}K_vOf0%C;3P-4;GopF za`QqoHQ%RuwA%e8Kar{jqoP`Rw&YkYIaI39{Q-xg_>--l>NdX$ebyzd{v6Mc>1%rX z{Cljs+QK_fZOX0$SB{n2`UMMiyBq?U0u7XRVTL?pP5gf^OyaKY z<1!y^&M(%iuyJm(CYi&3mHreeIIu)=jWYgLip6KkyY|gt4+MTAUy;=5^Bp~YhKG4j zp`8R~L~V7!Km{dpWSif3hmd{Md!56h_^}Pg`jOc}U$xMIOszRjftU8x4uuF&ql7A= z@{x*`3#teGBSO1za^6`X)`sNgZ34L^%z8&>F6<{=g4nE#G9H49bjYF!^eh@;Rj%Y|ER{{m0>xQBk3tG3J~XovKG~&%bJ6^zj=$9UxEe7*?bb+B|lv3rY6M9zN=!VTWP4 z0`-qO7pzw%TB`%K z7s9B?_xMOjtTr3|QU%=hwtZ zWpS{wdZ{87!Urn7i%(|3all{poP*#>YSGnQcEZ0wh=#4@@EtF zvd%@}ul~c;5A8I`3zF%cuVrnIf1DF`EGt?^T}8B>T#{)fw<~<6O!%BMXUqAn6<>Cv zJ144?5R8L4+&y6wd;>Y*ISq4C6+oql$oi)&MF8U_Ja#txkUvP6!FwILm_~G;W4r>q z4iNHLVei0x_SH0#sh&1z+a#90S&wpGNNP#lBvP$i2ab`jpWpIWaq5;L zO@HT=KBqcjKhrk21c7h;GyITkyPm)ml%i#$Yo8;QiFTQn8spyV^_MXCWFFaE_s$}N zcZww1P8#y*4utG@XFYq|WMFH(3h+b0haZv|9GkhSg9forSYm%QR(Zm1A`Pnb`deSC z&WEfzs(&JdY^O#;d;HW&+zO07~Qk z%nTqhiaVD8l=i%vg}Ur0)u73MZAsns$$#dX#BrxFCLayNr4W zr_}Koh~Xd^uMwuEJ-~%igN?WRJ+)Cm)P7EqQZAik+1r2T;+^me(5k6HGd1$13%>RZ zg!OKerq#E-qXQvaBqsu6rM8#+s3dpJ`cR7UI#0l zGwifaMDMcbBV1j#wifF;lI*_JNP7B-GsWWxfBiPf)60wM{40fUy7czQk--ZLc4_wI zlWK+z!kU&klPoaRlNOro?@Wr^eAKyKMiFeGp-GuE$xNNCO;TpnAW|EE{!6`-XEcNr z;ITK_%#?*#hS>Mevl_NwgLu)D?K0j6!D$AmZi9v+Gz z6+{rfsw>|beLg)6u-|HAsiY-q-w#J3zP19?vs?8<&GvZ1*3)KJv1EJK>*WagaZ^;g z!SRtxu8))x<8pZU$^BR(5tBWvd*QUl+>B8$4gaujijRT&UX%xq*|g^k#sosdh12$skLs z@f@SS{q`#_8_a_Anq<1o6X`52nMw^+*zjI5N;#4rU-LR<{*!U5)ZRqC9HmdHn$#TI zr2D$mx)>lGQc_Y?v!vJ&X|H-+nVMNr?r3H0Wk%ri+x-6N#^m>>&Uuu24Lw#{O8O5F4%2W`uYNfM!#I6#vxT2ntLFv75 z?733ptBurI-<-p@$eWBjQe^y;m4bJF@Ya!9NR)6;3OhzUs>9k4Hbo!9$qt$OA9ZmK z5reK~&~lT~Lkv-1gGqv(PIln~a3VbR#v;at|8DXk3V?rFQ$HrQu`+=%Ec{D0kUcj0 z29!|`nzpj++08`CF9#a7l|2-y2kmd2L&-sOxFv&loq4p)aOskrL^P|6Lmr1WzT{K# zX&dY5plJXjI?)ZzvX8pxfm=@Zvm{{=&r0P>{uB{KumS8$EJY5>Gn

    K#>RvRK$Cen9~wodaq z4r*&_CFW+sm;9Ftb2|>M_llQtFHEmmQt0gC3T7Cd6Q$`;UmzSK1!|b20j3mGY#|$N z$tWLNOWWU&&hMu{u`B!PVl}anUMvC4{WErg2`x%LhOR>8IYo~goec^X2U!#*Y!DNQ z;hM`mR@WTLoMv%1h%k92L$1nYe{OHA;5Mh8MHN5+^ zl zl9Q21w`@B-$~iJyrJR9Ut6w)h#uZd=Z#4Oi6!%J}%Lh~EXmN9y+_wI@e=$MA^^?e? zoy%&?7N3FR@s2G<_~2`A?p`lt&iC3hj*(cANa?MjPf1=gp(D%1sRvr!3P;mo!{S!H z4`y0JN2m=Q;ulA+OYh#4a$U&@ObLu0H|xVy{#-vNEIj{ah+Qi7=`}DxzpEiYA#1qu za46_zd2*A$%jVzQD5|OJoZz(o3MiRi|K0YVEmNZeQntsOS-CfREcRJM7OZr-OX%S1 z*6ilcB$097KzcCWQI+LtRk7NBAE_%YLuTj=<%u>35FjR?(9=KCQoPpmCw&g>@$;g5 zgC7EF)PeqCV&pnU#Z4Woy;>aBZ!nXanzQ7J)*j~@&vDJNzb3P{CNr7Q`*LX_0dx*z ziiu?ps7vZgNlgZ(jMt5L4GLH|f3z~2E29Mm;msbA`#mBlf>sY}1SvBPX zf*#wTWMLUV=y7P+ShvgI@!#n~E}dZ!ZUkoENp-#3^0 zXf?f>gzn}((EC}^yyA2m8c0GgG_6zD3IrY-=o?84A;aE8)vf)GHag8+3Mvw9?xY$P z|5eKZ<`*ECAHhAVGZGYCHiNuo`EryAG7fZSeT-5%6DRok8zcQ{O^ zklAKA-by`H+MXDh)lJO)l?644E*`KxQj9z`?Vx!%*XcR^tM$5!yBJ^9hHYTjxeKfI z^rgKHfyn{xWe2hr%sl5vVAqrhvlaLnH+l!I3-34tfgL@?W$Bjr`!MUhWo7?0uoB^V z7_Xo8U+4SnSwIRO2#bF5(i}vhkBIagq4|yDT<2X%{)1ONC$swWjEs352mH0$s4z*l zf)nIOvbMM{@F#6Odo%4Gwa6CNkw@celI))I-g{HIJcSka`e8gpNZe!ZJxQ$cc2^iH z<-_fJc@r)2Su6#!=iNOX^Aem@fdV!T$EZ%f&4u{QzHIKI05G5M_&Uo$>Ivy!Y7Khy zzZ{!}AUOJo_Ee_(dyTfUXYB&+W)SDG+m2E>@jgCY_f^MMEG`G89_n~|drKUk!xwkQ z9aWC(0KyMU-M5}z$W&b*4e-`A&S`ACpS*X_%jRi$&Btj++jDo+>bhhhIH6Gtk{m@_ zpPf~U2C6cGn3~j-K9N8+dVEG`f6X#MA^>8I)*-7nOPkMndb{lYwyp=@^BS08knicm zTigk2_&xmaUkUof#`NcY;#cTnf3Jg`V7q&R54PrEgC|o)ARf@@nq)ZoZZUvas|7WG zly)2A!Y`Ygp!P@2DX=EwwuSNgm58e#sds&7s^cjfRQK6rmD=ip1*XLRB|x^*q*9kU z@ye3EbbYNIctc71Q+a0C7$6Gkgo%H4?tF7b6aop^=vd)#H7IAwatOn#8oLzMXfg=a z9!v+?ku14A!jj?eUxZmh<_xJA-*|YPjoeGyiWqo?*;fSf!=o-zaA)Jwt+&0LK5Nx< zjEsXhSxM;AlfBAklp+Iv4ah{T#B<%nH%L*HlybODy-r~|-Vcanb2G*`)cm@@yNH9Y z)I2&|iTSSYVUOXCUJyX zho9-);v_=TAVhfh-%5n+@zZbQ4X5Vobu-I%F7Jh#^i~!}X5!QgB^H@(y!wI;IcK36 z^Y*TaA?fSaCRF#EUj@A;esrVx)oriKVplq@oPX9fRT5REQH2pnA6<~{T@SzELn49N zn&_?U-LEkztVwV+>6H(WYDe@?K{i;G9)>!BPfOT>FV~5{sB0E4AGzE%ebJ zHpr@+)HbO%HaV#0&nYe*Ksco8D`r=?r44jF>0j87SKB(-2*BoKWHci(Rg$65s?{>X zq&aQXan-=hyH+p0*Sv3+9m=_VvC&S~2m(ngzj+m302BeXmbfX@+Iu043l&7x^gA3= z+CqC$L?E1gjseQ_v(@J#OtTyq1^$nMLe7o~^ZhGK$!nuG;W-ksPH?B?)6vL`hSk;s zN$g4HaDmaXP;r*A_YRfONoVJTKQ&d(sg=4@Vmxt>EorvRm_yIz($etau%Uxn3{9-0 zL%osdqNO#*ZQe&iQ`M5~Ut>s69P$jO_jW3bc_%g?vLVt~t#5$(u~^ZWcd$+JA{4P; z4J324aO|cLh=>r`#>9nc$d5)AS8`gy?|%jdq}Ft>MZF$5RMf(NegJ_~wbKrm zU>;(nAnNmYzQ1P${$Y{)><8v~cMW~ATFa)L;pOuntGfTTopHBQ0M8@+pLYs#a^SHi z+pRsDA1^g&V5>JIzYI%s3JDK=ihuFBxaLXU;RbP%MQLd%uFTNUDTb!pYrV2nUuVd; zTB7r7jOx-mkMXP4>Y}QIkDQHHVTUePpNFKS8CiwI{{a63yZajX1#O09hp1bEuaCg< zokd5&t3io|u)(q|>|dWgiQ1Z>a|$-XMus^h>>Xl{b(V+oT@vVqKEA!|x}_>A(Y+$M z;F0xWG%s7-QG2Z@xVXmmWM+Q3cO39c)jQavF57_e~#IcSynC) zwunWB*1RWXn;qB&LQ)uumJAiF{eN721z40@v^Jq)f*3T2FsQU3-6#k{>QD|TEeu^F z4Mzo}9XbY32ap(g=u&YI5Ky`YNoi0z|2^Qn_niNG9?#=JVAfvou6M1y_WpcmiDEo* zn%Utad9KYirvT*p-)Fm-h<9IW6^fA6N_+Y_j-k{QVsR1lIiWd{5^V-J+c<~HLkVk7 zzJpCsy$_k0gK*<%7RfxfCXbHaCGf&IZzx`@t!8p_IMvefQ?5>4nbXe{hfi0|#c3vu z-@0HRT*cm1(RlmYa$D#f@RrXvor34aNAN zA%O@Tf1kh7RkHSEcS%c1wD@U^c-?Rh>8_PE+MsZEa}#TK%dGdSE2!IK$W9VYm-u93 zT%J3G)U<}T#q;%E;FntA;nzyrUYYDDX+;iwgwvGU!OAX`Pbjc0&gNbK9}+NP89oRH zDUC2lLNYjg%KY2lUy_9%8ZBj^DPC;yrL(kp&##HrjI3wNn--s_R~-)Fe-y z&+q5f@PU$Z)Q%6-E~~j~6BjNQDTu!qvrYw#tFHlX&7x0WF!$IxiAKMB_*i9$FGwm@ z)o&XhQvc@(Fgb1zgmdx28t%+CLx68-wXDA7auvJfzQ9}Y0gG*i8#B!>^e;Ff3DZ1< zcAx0tBAI4*_!;9aGUrb$osn#-@&D-CQ6f0^YBt?9UPr_swv@*XPSDOE6@fLWDd`YD zX^Pcc?G>hmqiQb;Ty#?r$}wscKDSUq+?ffe^}t+PvTh8Ja!D2d!<7H>`WweyZ*}@( zrCU_;qoMj3;9WX5Dx*rlu6r+S6+OgTvz&uL;=r%)^4A8_7u*u{sIXc zquX`!Kc)*N5~pa>kH&LpOjhJ^pz$cSA>rg`4cC<2z{KT-@3{4TUj0cB3z!nP>QFEcWZqop@8KOCZ=v z01lFo0&dDlojw7Dee>{~IJd+V7Pq>k zG>m$e@ht!S@#L1vOM#hZP}$W{up!8@TP0u>x9i(~<@+nn z34fq>H7tym3*6M$+7E%SJ2-Qt3(CJX*3l@*$9ath?x8I#TLER{Mqp#|GxD|LYL=!T z+>CLWEVDcd{zu+J@OY{f9-|qaJc}BAqHpUne$eAO_+y%JFoGkxMf4K5+(fy$g9rd! zkGSR@nyXcsMJez@ZTO1?CTI#1*2%It-g)}9Z((P;-0v1MzpnMyg(%FwEu(JjER$N~Q~_uD-)Hs3u@ecH zM$@G&4=UUNw{h9Mh{_%Cq_BoZ9nQ)Uqv)r<>=aL)O9dK)dQ6wVuwEzsnsp;tNCxxM6Y^&$M{rKDBo{k)*6) zOQ!#mWln?%_%<0h3lQI&>~y;F)*>r++?}ppl;v-IvRgVzPB2e!ZmpRG1hjyAXlcG3 zOoD5P7a!$$u+g6iWN}ym7E<&@q*|IT;BUPjO?x@bLC$nMEE8Q<&pUUQs0pVjTXVG= ze|5rVu9k#}@!29N8Tk)NTd)u*0jn$7Qq1xXYHA8(k^AdVm$*oVpOC!FOmXYxDjGf? zEn<>GS$gRK*Z8PWQUaa(YT8jk!)N%5r!4`Ge`pPVoqHa*O!Z@QpjzVL&dX2Ce_h5t zTmT$fE?5Bv$Hasbx~`wZ^jDi^<@qH*q^33Q;a9i+?>KH(;Qa~yC4+G1yQ-?dYsjf-IV4^YZush-ihqo@jkNBp!Z0+9#HiH*ru0F!h{ zuy=C9gJ=C`Uww45wj`?D;=1`aWvHm~M8_Qxxq9&yI@j8hm~!bOx-QdQg|3SynmRF% z1L;=cx$?XCSZK96>;{prg|s+^3y}K&&;j9P z5H2tb1w}3utI~gRgq+p!xRv0${QM6RtvjaBha(f(g0?S9^F_y=e+E%oSicE;1QvXw zAE@MO`{dsI{Cv;g;2fl_!a{KF)}4X%If5c=(9*pQp`k zQIy5um0z&Wzmwb+M=Z?u3=Pdg+OjQG6KsPywb*8_U0N(;ee)<$Mf}-xdQO2$eDC+~ z<2-E@B_)El#ZA3yfPX~25n`i%_IOv-!5iw;Xms3MlA$vPN%~Gs#R{Js%K)p*D@?We zVgAuqaK_sT%8QbK2rH}D@4F&GA$9(<+B^7JZ)on=wl+sj14bTo#%|wr951_9xgACR zTMdgzS500y$@Tf7L^PuC?V`Kap}&`P0^TR9nmFN-v6@-lPDSo;@)RwhjKY{yojg79 zmNomG-$-ruW?Q1X0I_BUdb!ACBl77yuaUsX+n)ku$Tu zoj8S}Zc(n_uR-@T$T*uDskiDJS1U>2dPB9Ys(Nn{_$}@JN5iTpxN2K`D zs3zdb9~&L<^82mZ;}c=fNe=a)Z6Td52hi11cwHE zy4OtihZ(^utM`9SH6?ybwiY!OE8bQrXHL(h6-J1Zys&o?29q(DE?w#<@yX50bBzZW zEZO#(_m0cf(gSP3IiPzP-(9h`l8v3UqjW<-!HK1?b8X(L`YTz)ycD1Y2x;`XHK-^$ zVJPE-bqppOBF#M)XUL3D4aSulrO-dWhrgImwt&yBgDi2jj5>;ulr$@VQ>@irPt*># zNn0V4qh!1VZfD!t%eJwXG~m?e4ox$JB@cGpWwO=%>BY2_4fBmwsFmO z55wb;roZ^O;%+opcr-T3ca+%qzAxo*>ny^=p3T%wHgEvRn3K46k^n>ZI7}c2H~Ky< zaDCcQ{Q3usbv{nrn9-6Gu_Op>gN)vU;u8g-HL62;U2f2BNZigmg_uX6z|2F$lEGgk zv)~h#dN-u+2CTRL=d9zHIdMa!spF0S>u7167A&XBuwT9c!qtkl#Fdu;{K&~^88z3t zI=%DfNyl9`(b{-rI%~MhC59=7CSG_&2b`ul`$=HXfZPgAruZYem+8^#sogN5UCpe+ zck(ewz(?q#W7iWgdl2al1wYC)8sc1HaOLV+ zeWUbF;?J+v6G@8axR|*b4i`^YB(n08XR7!m5qC03PW}82fTrkTbsa^GLiYE!uMdI?GMiXAW&f#g25cmg;7zSp(nq7o|Fe6=>l2GT{sk% zFOrnW`#~26YBO5so4$7zZc_tcA2Vxw`m=t`SAFjsoQEcqy(xpyrP~s?#z_XTcr`b_ zdq^YacJW`gQ+q}dx!>i6M_0R~C0MW97*dRWYTkYi17}X!p~9e+$t@@VG@^T;*v3)Y zPi?k_JJs^-UJwWS0khaI%wfA?K{auiA)}(;6?HYI#=l+x#}tz7u;{1QgCN`{C&K<$ zmEOnFKE7d#@Aj$Hhe)qZSKbUsFfovOqNI{fM4};8QXybf_8+w!C$$hGx@Ruc)_d?X z_z9X@F@W)gg#pre4Z`G?l-io!+@3WwbiEuY){;Bd-3*P?w6r`#JOQ=c$t-{+gNR*a zO#W1^a0#W-@9)0Dij2-$Y(l=w$BghYxSCu8RX+{pMc)e?mNz}PZRURhZWu(w9@FWAloG}W-C1FCU!cz~YT7IL0|a4=mu#5LfWpp;$? za$Jw#FR@=(-;B#+foA36@8PdOcS|vBojL(-(p=yUf}4FqrTMrhIO5N&D8)6B;c1h; zp-GO2=yhLco^(H%NcR1%FDE2rb^a7|8vNDMDpDY*+5W^KiU0Ks^}h&cSFuEao+_)2 zT=nc;7Fn=;k$8>>lHDOLP6yyB7dN+Se4MR4_|GMjyTikgGgK@S5Nj=AVP7Gu#V6`% zwJ#n3Btf9;l5kn(DlBd6%~eCHj$Vk&w^7vJ)HPNBVwruHYeMCh;AWtzsv34{Ilu( zF~^7|sHl9JxHG$_ucuzbQSSVrG=F{`lp!)YI#rBEDmVaKt!L0HSlCWv4BM=>j!q6z zJ%V9=p}XS_H+OgV3(CI0dz4*0IW@VO=`uIG)8n}H=I650dk!wp{F055%J1mtxPz0m zNXXFwFj)UPsFY`Y`nPe$5z? zzNuM%_~x8Gw4cG866FSZ?)&9rGEO<$K18N_K2pFc6{ESkOgfcQg<@GhZiEn*44~fo zICnhZxbo%tTpI8BOk>mW5=xQJA|lY7*jQzSj`{w=^cN2gHvQ$#PcHK zaR*Vz@Z-6 z_s}3j4RU43z2Au4(k^!+vmZ?~AB3HWQs^`Mm|JMjn}b&;8WGoEKilWKD1cxqz)?ML5G%}HM_`Z2N5^m`yXT)-NR7(KurcUw-@ z2*0@t1Fg_TUC8|nl`T7WVuVCs9oN=ok^KjvzF|Z*5H!o-tI|c;H?%O-s^`-9a1X&hTe5SQFPRE)swf?zN?E8 za_hDRMVG=4vNWT5OtpAB8{QR_cEee`_$ES9u9Uy)dVHPunUQqnc;(&56_!TbTCTY# zh^3CcA<2B)zRCrkM*Y1q+$IS$_ygh>)v9SKX6GdOzb3wbOgB>{Cu^8y)7hay8HXv2WU%<1<*-uJ!9`!$>^L?G8sAQM|dA4 z^H8>WyP1oN&%W_fo+d8{)G;zwOof#gf3#cOx@&nZ5?Z z&Ud3UC|~AOpqZcrm{P25MAesShOO&%rL_=8Ecv25-nfNgEYlV;?)tXTX-%9o3BnUW ztNszm<~_XK%D=lh6~`?mp!+h~x?HWmwp#-VQ?hw%4v|KYm=?zLiqFP*Apb4%T2bz46^1~cW(?cK=W)bh|SG_FfE$?{jLF%X_zSKIHds$Q~wzhK8mZ{(0ZZ&9&a0eqe}CbVwJU`1(H8 zNOtj;_MaHgFnF+gX7CPs;77`iP~!SRn&`SUyD|Y~OHqdI)?teKhSK#Bl0LNe9ZLGj zpCI_7h2UMXzFYG0;siP^knX|H1Uy0WRtH^b8gJ%@c)tl?L0X}F=ZjEu0Kqr_ko%x*;=SS_Vr0r^~Hy$ z-<7}pP3E!anA3p>LG+FCuMh7>4}Ql7XbM|*IZj%?PL}woP#_bj6W*Ya+qh1vWgD#E}E*;WNWGrNYur5E; z;Znu_p7IV2JQ4*tx^}v{VCaok{PXpDH@Z#} zcEJf1^G;sMN@y1_`Nqz@hKz=zKgLGCC(~`eXUM-PkhS?er93OzyEy~s;UH$m1{o!p zR)2J8;A60o%z$?Lozqc(6Heb*q&GN_@+Q>psYmULm*)ieOHc7!_|Y?Twn98Gs|rCh z2&)l3@qnX!W0X9)Ot;ILL*2abl5=t27^h7MCeIfsFE7?lr*+cVCD|f&Gn40QC>}l4 z*!p)%Uh*V4y$ZDLmNuX!q6TnSZBs-_3=yJpO9bnhUDQ*Bpve6V2efA&tqvSM_`dYD z!hIzsaV`J7J!y&EMDybiTRMVZ0U<{Tqz%!2EWMahMwTYwcgDz`hO~QR_kzgSXK;@aodrAd4e2~2W7~V811jMY*`N)ZU~TKS zp;z;9{$+hGRJYA6vDk{fjs&+5OlJKaYvG1gzZmH1kOb`C1I2y@NxL9^eKB|veC6+B z!Fyb4=9Aexs+Sodw3aMmyAVTeB3aNPX$t;qDVw&e4on1$?Q zkq_o;g16y=ALiaRr&=;QPpMs}rS<>io|&1MENH1@-Ty0c6iG18a|G86ce;7iZB!h6 zwqbcUXHCw*HnM!}bQa!-kthjx9|zbjc)P-K=gC`48;5#vAK$9cnOAN4!xmU+qb^Pq z13(qy+&H1m6MC%yuEov(E&agcg@2K2647JicUIk_IA{gH&oU4+lWpP@k0aD~*BsQp zZw~*V>F1RbF3?eMRIjC(@*ZBQ@oRH$McCfay5GTbP571RAs@8}@AD?OZiC-}+j#g3 znf2;7>=k-Hl!8Vl`U*{CHH`k%LP(|q3E*4%Q zuf05|3E2ROK5dhPIl$sfO8OkYdZIyB?Gj1`+;UDZicY9E4d*Yt>ch?WlM*Y*x_ZaP;PrwI*9$I_4&U`!g-iE{ZZ5ExO9NAxlbfVa3g0A4-S$S>oNjkjU*(amf=aa=7b2+kD?3S2z}AtD_UV|RDvTC zltC|{Q(8~{ke=^-bG6zFuTs-??`C8_2+uXd_qG*4EZ_+@B7@%$|;pj!l%b?A&5CBg;`bXc%9+ z6=PdFd(+xe%VT8i!HuI5y5NKItlHWM^THH?N!F$EJr<3^(#>Q8^vuVILy^`w*0F|f z#FhAj3jltE10E)}fwF=lG%Nb54Js*^N{#As3qE442)@yJ^%Ez~bXy}%8_X*_8rKlT zE9}y%5`X^WIXrGDn_Ueu#S{vw+}Bh8Zlr$XHv;dYR${5@t)RVr-R3Y`c5>0nz!lIP zA&fyS&O*JAk(P|7@_5WmuPP##3rnmz}WG(b3R8J+sYhgfPy zfeRzPB59{0&=CTAd&e7+tR=)v!S8pV-T&sMmoFXn=tVNA8~W~?+u?{w>RVs+M_WeF z3BYO=1>M6O35X$~ODs}NgB#`3TBVVM`Qa#u`DfPswtn;CgEKVNo`bm{-s24G`ru<4 zl1uq7sKm8VWggg?k+dguRvW>_GFu~mv<(av)*0OWD~(OLriHFLtg9+kj-I27B(6aa zOf#rw$;>Pqd8rH$GGr;Mm{{RzIR}@J$}uuktJk7M+14w|LRLrkL{Wa{7Ce9v<^z+ zjBYu3VDWo5+~ecg?;~)B7$zIJXT^5+;h_*13Z^v%?qo&v(|jX9)1}o*@l_Wq=zTvB1I1 zH^%^ioR(4)AO3eQdx$uUFiw7(&YbJt=7^R#&MS8Ruqy8%A{4ADQi|T#4`s|Wr@6!< zS3d?nYYYfX@f`-ms6>qh@*<0r!i(UGgNwr&GCSJY{kF9$reJKsZq&U(vON!r?M-NK zDM(Hc{k73=;+kB?+bO-ZNYP2fT#feI{v3ujF>hZfOc?#XC`IeAu73J)BrWLNCLKHm zi(w@X7x)bE?L|^ud#MLmvclF4BdHrtBkY4QRjB&es`D-;taC>vl79!UWq^S@FqcWk z$?G7@v;Sh>0qZ9r5~GsY5M*vhBOldsW-e5lN$FlrRFS_>N@MaRs$(gXB^tdw~)Te zdfM8*5*n)S_ttq!wrkoy(;ZzYa8Ev0v77l0Eqb$oo_ON+YjhY#957BTG7lr2kGtSY zOKMos7t}X&fulX)+S=IVYn58WNs;5K?YsFsjtx-wH3t883u*TRxCA(3Hv1XED|fn` zw#Z(gq1K+Ino`h%-HpBP3%gqj*$QlbOhWz2f+1+HAtoJdZHbzP2cydmS6viGZxpzu zd9HED_Qh!K>$PBNI=^6TY;C)UGl&xtW=5M~ZNh&0&u9Ce{26Yz11F42(Jsfn3glvw zaue77wDg^*uu7k_olm{ijs|(|vB3!$Sri}^^o)%Q?#cM~1sm(Z^yhgR zZUg+4S$9!azX2?k){W>P93rzlt1{LOk3K9-i>6xj^{oFu^6%FlZWMY0o3vaKHO$`J z6gil(mhCt0-t5EFoG~KK?Hz9OK7x1a z5!(tJ;g}$#da>%Jul31fQ^sDnzzYuMi5iN4sB;kX{C~XbByU~l7|Ofa&yx4+@F^R2W#qqSAVdWTn2wh(w!>hv^9r(wJ9&$l1u z_-E_Cehd|vzWB4Kjy0@;^_zHO{dV%Wh0O8tYlf4}dS68P`MPugrhEjlA3AX5aS71K(2%DW<@3AnySkVr?Q*Jqt-Oq#RtFG$_ zkP%l8;Ed()CfL($x^=&jd{o(Dq#41~<-ka62j$ z&)5mVj*0*dOkyC^M^~Eg@20EQNjI30@zh*YWjAnqMn+z52wQ82iYa(S$%OPN* zP)jv|$2pn697e;3f&g~xyFx+RLO01XEFxrkz5>h9_1$<9hghlT7L}?Ci8kxLNxBcJ4Lvo#ev-qhuK~Cd? zLwSy{fuR|pKL`^^lg~=w(Gcl_hU`=;$F`D8Rh(73Efk#5+47uyZv^g^&x1wE8dqhl zeQYMeQ?Z4fu8uhK{R^KT@z-y^(~z0g-dJKJOkP;<^RLrS^X4q~sni4F+p$Kt9&cke z0@qeV@%N3St4f8|#vhH{a5X@mf=NBh6e zFZz>&aYaPg+nIB&H9_f3S z&tG{-^Onw5&BBP)%!e($$|t7QQb$yN{&a%#i3SD+c@mNmWs{MT)gN$*%#IE3uhW{a zEC`Pqt8*QY!l3VsHi#mu-b|-4ar|7@0G0TrKRi)$Hd;L&*RQ(!ZiVHmc(^?ATi|w+ zcY5r==9!%q4r6|@7~znQ7rb&Q^*sYo29cp{lI{z>TXJJn@?D^V zK}k>yu(QA(U@p#g$5SF>64C!JCSjdS>*w^Rntc!hv_=ug?#&o_Z(Q0K7`~xMn5UxeugY@o#bmsB` zBi^U)7j>%0oe<~te&xzn&h*XWI=qpl{f`leTm|#9aQgp%=JH8H@AgIZr6P{&)wchf z)iv_U{8a8STXj3#CHIzE;s@(=PrT=576T)r`-YBx&y9j={b5JhR?oxglx5g3=ux=3 z!gJqiwbgH{gZxL(seJ~B2llWx-KSqJIhUO;Bn$LsMumtN`81p}!L1&+1NX&B(oqZS zC6I5|D?vp{f1ANP^dwEKn&K0GqA0U_i2o~X7VkxNY!Qy4Ael8Za!c2? zJ+8kAB|W=drC;HP2DoR|l7KNe_)S~An2>5o5;>$DY`=qVX>9%$*Q9AmT4nj4CTVrl@x~aVxf)liC%6f=dA_X>=F*$|q{RB0 zn6kS&qUXntpHh1r4P$TlQ+>-HMvjbmmP4?I8@=;S>e=EKeTsHP{U1-(9c>!1n}f~W zF_~$eLHsR(&Sm-gFrAmw6_4aIO9jE_%aMW^NL2{}l4>f#pecYcng+3C3*K{NyEb}S z{O7fk(b0W>;#}Yy=*;~pUIVcApP>7j`bTOD#JWNZD8Q9*pBt;B)_{LJf-w37j%eEDH*O3-rMvk!eyBt@ko~2(2xT!*pI<5W zlo>!TDWL4XRBP)vtS0)BmOpv`eX1Owk6_&oqMGAP06hVd61ej6hpjdaV;M6uR=#H0 z=xe9ee6Q4bk5R?iy5EyX=v)l4s`G87jmtNftx@qI0>C#t+|RY69S1U{+)royk*7=@ zPlWtb=Da`c1K1SpA-hdHuKY7#Q~huRc=xF{_~J$`r2S;mrIBK0j}Kfa58Sr(#BG82 z;3={{MzKn)`qop#9EoyEeNM%PZp(xxn7T7D;sZ7%K9lpKvTCNn`nO znwuzR8-39p*v8=W)_mtYZ^Jk$+^~)A&&NNel{@kc+FTnShrA6hX^2#0s z7n|HBlu2!&fJ=s*HyHNObH26ss|U4X^7z1LYqx-`nTy>%B2KLoJ?_FxvGRbECxiqZ7}2i6g(M|96F#q&Wifac;SG^aXuRv1M0kBl4FtS zE;^R8D|wjo199f#4DG>{*G)78ny|Y)C8Z6S7Sd5i>disTXNV5)`s<8d{ z;oIq%#Li~Td9z3K54XKJ&Yr10Zf#z}#6TiIB#UDo8<9KM5ctnMZOg}jI}3-AR2Dcv zXz0tv-t_6dR(%rsvM0WV?yS#zqS?+vK;jyqEU zWJGG%l&jNagfiYVIz=z=pQ7C0q+_LXIVi7Ndibjz7aVK<;|j0zq*RMOLC38BaK?=H zV6p1_=R0j01KN8iWZAD(tsbW>-}*eaD4hOA40qY~b|+_*N8O4@2;7s-M$s)G4-jU6 zg1bR0WM6Ic^b!7`we;vtJ`Ta*I|OWqwT1p|cU)s5j*~JGq%Y@zov&u)-S8E*uOYAT ziKG99!y-#H&=wAO@~W$q?S1?`P#PWI3*go%a*OU;t49QtR4%gK@HtGD$cOW4q=}S0yc0r6ysj6 z%F50B^SA}}C<8!Wz!U77vZpZ#j%zxn=!J*rwEjaX+RYh2r?ZJmE%Rs`iw70ud9yN* z()wD%{C?I1X_dcy9HuGuz`l_Gw^1gm8;?Q3m(tGfsO|<#N8DQA~3HZF1<{5Eq~56MMe}8(weRFhGA$G_hZ&2ZpGwaUqr%peA6O0&bHiU`27_ zk@wBQ=XmFy5Y29ok84pkuK#Fo)dFS$z4srpdme-Klhc9oV0)rdk4xOLcUo_*?gHPN zPa`)7s(sjQgXV8~fH-Uyz?>MCJs%aWxP}vfo&JXF1<4qF|4Fo$@6cm?kB*9-Dl?sj zg;9%-8I}r*{UU9Nc>8MV2%@O3_3-0vC~or{kn9)#@#Z}obVP|A;Q%sM6b4M%T+Uk% z@%^a3hr)K<%~!{zzVbNwn9ZGyb2%dtJTC{Z z4U_5fPs$D1kM>osUs{~4(TEa~J(T55I26-Q0iGLnv z^~)V`&Ak#g{ibBt*zZ9Em2_561EDZ}*4U+oZs#3jVkaY~2MPvYNiyVobYYnQvcy$; zHmb;3Ri7XTck%oBT?t^#JW#eAmvsC4XCsO3g2aPFM#J&gHn04SCsP(A2^M5E_b`h zB2Jwl4`up5r)2bLQu3H!9arW%HqIpNT&xk;<5B@XviU`|xA=LEzOj!w7K|=f_`X;< zf>;z!e2|@aqIn~@9_;WlyWX_(%5uT^|~a_>*~VlG%rk_z=cpMS)x&EdG{CEcS?4&Qolvh&`ZXiY#IE)_$uX#3HI6GY zm!Rlpa6hQOIhER>Uaz)Ty}bJImt9A5MHyJ^XKp}mTf@|sUxk^yER)quaXLGY|y=$ zTT|V!RkWPD@C4cGUZvIZem7gJpcLZ)YQ-n;kW;rhyvsbbSgpb2@y zDTgx-=;tG;u?zNC-(}`{>*FYOYMTN%s8%8?gxyW+@J~v}9N{7cv9_h1IBE z#7VOZyuydHcI?~8SyE)k8W=8|j-al_dcIt6VVj^FkAL0Vd7Gu;`*Z&9fuc~9h`fRw zITOj^gGqNJlA$4Lp^WgrL3ZtV%ZT21u#VeGa4B{To+Ek}BYhSF=5e3@nC?Y=0C z`d0M_scp^`v7I;dteNWzA(gTCS6TD+7uYAzoSDTVH&GC1`(^6kZaVcrs(}Rj^WlH; zAJ=7*ElRCD|8j8XW29hs$&l3WjN!Tc5k@Ixl$VaesR#m8CIt_&@VRfTgFR+{DhVG9 z6lys$*I(5Ftw8JAsDW4YxSXC=zgxwTIvrQiBHU`zJ7-@gDEQM6+r?YO4w0^o?W(ACGNf8% z8qWj&KTS(5xt3isa6`_#$etAu%hGz0;YfPPzd<%nsVPE;Z>H95!r4g0M(RO2WX_Mx zjC7T@O0owgtIRoCe9olenmzje4XQ6qS6FZH7F!`?>7&tHzO9MR@x+W>%uK$6;arXy2to ziH}NY#7o^bA0K+8j~_j9<#LUD&hns8G89g+)L-%^jmMI);c|4KT-T_qEkenNvBa!i0@aeSY{cQW*S5t!7k^&d=}T!rM_R?9eDIK zcMGc}`s5|M0|?~{YT;Q-WK2?P1GKC6%F*_!45G819Zd}y)Xu^YTSpxf9RRdqxHW(m zQBzBj6=n#Gc-+;hQBVd(9)cYEejj%mK}&79)Rde1(R<9ib(@z)(v>Zr1-s>)zQJ|J zExMWd#aDJawJ(xzM>5|pN?3o^9`&Z2k5{gMzJlkIvES&HBW~?NfeqU z1NBIix+{Og5>2Ih0s(on8{S+owWct5BbqcyRXq}>M8CJRQn}E?@7n*`{Y9%jBj-!tyl|k4k5p#^ zJyhD1t70$*6Z)A9=)H@5FK>i&o@+&&^FR>f73dDh>gN&a`LmJ_n?yJ>C0&D+ZPP;T z1W4N$M(_BHCy5P-j9zbKxf(V3EWFz2!K7Ko5jmdWu7RdfA{D5kT7Z#=yKz@s5g`!WPi7LRcdwb{0TT>`( zSfNRZVdV{z>LiXB`>pts*jG)Ua-d@_h*SH`r-FRgGKms>n#|Xg>^_40$U*$$zd5Uz zdEav8V(1~)wbL8!3ps^^=o}cVFr_tQ__=`beVh$1_;oP;*CE!VHPg$F`YSJPyMZ(j z69bdrXX7KI>==C)xvDl^HbRZcy2Vz0%2;mlpKr9JpFP8Rex4^Hqhw%lNF~WO-|A>= zSG)KmBuA-M1{t95&|>9xFefyo`G+BJY)k(*Z0Qvg;_yzZH-d)K5-XIG+RqpZuH9ju zfQ?HwR=`wb+x2fj=b_ZpLVhMY662Y2{$l_#AUPjRzJqIoJgUeB90e)H=q+QkdLmlA z&}CNk%NL>D3clx?b-5^`RKPN##xDUU6uz>W6$@)+X0uU|dCl%%D@La@f+@I0E!kB` zGI5sSMY!LXT1+pMl;ec+eT9*WspotcD_Jvz((Y-Xs@t2KJ}EC#;(BZu3gc3EGRxT5 zI!;p})KL;&jj4-~Ds%HhKhD`wY`Qq^G=5zOUtRvq07I!mLZKmbD9+~aZms66mX=Dt zC2tJGy)NWA)9NjTN&BewR3cfC4!My_%;}F^sJ~1-99dqKq=CXxdb5i@>(Uoi${dH} z%aBBJrX+B-YO>on;LZj(L?TL3Gfnx}BXS371Yz?aHx^$T;C7Al>W>MiP?yqGY!VH0 zB$>s;idgZ+V3t>D8y^v2$R@@|-7tvVq5BKl_!59|D9kaL&Owq)EEFFwoL{Rrf4_!g zg?x+_1aWp3om&m3V|)+utuy;3sWlI@T+CJG@FUctJZ$I3s~Vk!#;77{73+A}YH3uu z+nJ75>Maki5^CawE{VSe1{!)lp4-B*Yv4~A55d8cKNU~L>7P4LN4x=H>-F{i(}&Av z-Cq&Nj$$S6clBh(wuWr$jW@Vf+^MN!H`7(Iu?M-9ikHpf^uqwXAC;9Fw%MMI3%6Jh z>=PHquZ^owj6&v2d0Uw?;teEPChO0I|D>SJdhsS?dk+8wk%-M-0uB=dqaov`Aew@u zWUp!RGqv-%2m*5&3Z{=1Rh@rU`fOxdVRYd^BZaGy+xyo7gZ-rbI@3F;e)NakX zcc36_W_Q?RNcPW&oue2b;-R9C>jM-7rwE@Fjgj^+CvD^I032z^nv30mG25e(_$@JP zW{%@IARAu?`30RUc^~!%6N?YfoLzpTG$4x%fP{o3QLiP3h(+N~Up^vRL|3SIJux6F zr05`ZhzjwK3Xv8Rg~D!&(fn#-ejBeJ4@+*frqXcGe=RZkj;OL6NyVnrkms%nhLnaE zYMBQlT;sD69Q&dD$-e z_P#`9A7UrnF#FxCm&bVgZzz^UepiYQ+>miBa9&(fLSXR-b?dTC6**>(hmA5kgr5va zWU&_kU&oA6%cLlzI60$i?JLO`Q zmcsHo!!0aF2qmC?`;y|sAw);XbysF0Nk=W;W;+w`W`ueEe7Y*9Q36rZnX&QY*??wx z?14O*l@gN3NbMLF!WeLa+>xaGYbuBE=laIijcDp(A-1$QNoCX!s4dP&aUg^B=dC$2 z^Rj=R;>;8l3L5Fz+YNYi9KhCLM04$iREFaBUN%oChe~vltTK$FUNed_F;n|iy$1wy zBVesul(hF7F-3I6icj1En*Ff7X%XcH_<0W>V<;!H%oye(5mggv&n5y%nH_~!0|B<{ z&zDw_$BteFeujK0#H`Sy#Xh0hxYX!RqS=uO!KuhPU3BtO7`aUCf0f$RRS60Ut2;({ z=a{tFsaw)f5`+_wYUVQH5pxo&MzYUV(s?bDA?R?&edzG?wF1dPmQ^-Nr#Xw=dRP-VWvwW4hlx^am zkw2i}|ChC2V{_m-DdK!EkQXcZg}Ne6y%mHo%Im(uHwxp}ng`4)dX3qxO3Jojoimh= zJiMnVqrKD>zD0!~L7f>-wr8z@a!EN?OI^vwcF4~b8FE(kHfN4rEmLBO)Hr-}&jlp!# zUUEF;Kq_9x<$NV?mtZLGeYkuQ|cL!T80_|LSCZ z8@V<1I!uLQqdtc8=*ZX0wMpF;6*3^era|UFE<j7##R@3` znLbR%3+?R03-9doR6S&S2jB&Ks)@q7ViViA*Z4xCJp2WwFXeRYh2{}R&U?kCdjZ@5 z{3OE`9mKq#-*HS|M8foR`$-sYdGBL&h~~#ulqa_Sn08F=J+^;{IlzEPBX#p0XqQkl zO#hM(H!`@y1u*k>Ap_iO@uZ@pj-*yrAZ>q)3Ah?aP-xm?GcyK(JY+~+%)7cj!fc!4 zJl=ClcDAZW2VpxV zE1?j0u5Qi4Cb{LaIgE-;z?Zy*9y&#xVRJ?GL<9vykyI!)HK<=?gTgAF^MdQa`q42k z{U6_Mt8w-Qbf@g3&Oa+`C)PXqj%Kw>OG5ZznM+c9uBg~yo^k0$EcIGPNa7n1PFe3m zeuae~yDF)Bm6ut7-E&6wE$bmr+d|Ze#^!f}JmWS_qO_r$bz>tx@+pbmLBSLwDaX9> zF18EfxXYS@K$GvwAYYc8ds;C<4HE@CP^rnYai{TTk}Jko;KFl?)wYmrLj@@rWHiM7 zA0o+j2{Tx)%`X4t+3R`8#zr$mOkmhGfIL?N>;-Q_(N;36g^lXeYt1H#tQrKs7;w;iU)*k0= zq8GvpdUK9kxSEe@=R*J$z%8eI(ri7Q|IGL`aE6{bwh7M0Wsp^WbZBy-iC8b9>OyfR z&&ZWR8O_6%Mng?#UK7bdFU!t&z!aoz0kugt0&KLca?FbDE1!6+rQ$xmt}9g`(RG5D z;%x6iLf!|alir?qo>Eq|N#;jKZ4wo7dnWP?#`r3AC#eI;7^%Vx6R;hp1RyA%` zc>|4MG=2g#LR(D-EvRa9^c}5&AyBFkm-Dimw5KIW)4905Tvv4J{Jt-~@G#gmJK|zQ zor7Ct`S91;;w1G8aS$l)R#(#2b#_;g^{8)*DdK|2$??a35M#WLhTIAPFO2l?Q<+7 zzE7lTAfarVX`bl>W?No|jIvMIjoUph`knTnikglN{MM^-1lCGhoq`MSB8isBJX&6AdzcW1V@8dtcl~w~qLPc4*$q zJ*NfkY7>V9@k`r($$_dX&iqqU&Z^8U-*4QQb=_~!q?Q*JBI<8Qdq}{a>H`=KGSY!+ zHxe+TH=QB*xv^GtlE@Thnar!Sjv&*DXRC6a@0|UWA@-pm$B-U1dBsa<#Q&vmUT$oM zzDHoJ+9T2)f48c?A{!4GNcT!&5$V#=j0Fl(?N4`KFGTnYK8)2{Eo&_x-Sca8WXnl* z#)GK|0&jGVq>-dW8v~BCh#QMz8^1ff^^fd?T~Yj&@CqhvP6a5KtT~Fs6nJX<1tIns z{#LDP4%<%Me`Cx4h;%RJGI11I>lp%^r;1E6<~U>DG9+8ZJ74qaVy1a^9>q|M`w849 zX{grhBSJneYGyV{ieKqJU{&PXEq5=eMur@?Dv0G_NvSP}h_LfVJ$uLGerKx4T1CofNHCVg3Nctl6es=H z*6DYIS-o8;42moRcqLIG$`CpQHrrk(BP^Ajo|b?UCoQf4Y#?moEmsSlA%bx|!0NDj zCjd5|diK%R-i8Zi716MN8QVxF?dYw_R>B&1E)#5h97#MbCiqN4_|0zz+lFk)nbp}a z0M7x@nPoVIZ4uIKnkqKk{?n3^%^4>${IzcIS^=SF4Tmw`)F^?a8ujM^NLiPUv1)g{ z89?cqFO5aSiCwqN!P-V+^UXl`Od};8?43x))OUMyUjwv$SClJM1(YiZ}Ju7Bo4Sd z2DV|IElC|L8*IHwF1=@JvdS#F{djlujm+UVL9n1$9c+NjB{jqy|C>6R*rkX%z#gNh zR4**P9~`y=JOYy9BIJa|;n6ogfXM1c>hH59-N*32B+FR0?4V!O7`;wid&HaQa1cQXXNqPmC^sm?EhRvA~x650LB@ zZU8BZv&RfnAaJVjF$I*joB_7+kDD|)P!|3emjzqA_%=0oe}YlO5tmE9RJQ$tewgb2 ziHu<^(Emg{dWFbxqI8h6?pLwV&UE^*HaH>P{3hH*+PeQAm&$}?Vij3ou^f#7=syM7 z26u{peBz{W9Im`a8W-Zuu$s3d>gbMs2oqgbF$tal0Mq|}vjC)4 zbxfkZV#~uMpk4!Rb=lSauzP-}c5hS_tl}~e`-GZm=dkH9Sl}`lDS>}QfZMViCL`;C z=K)GZRC%L2d+_|=&P_6+Kb3g;OonEiyi;|61uf1E{hsEY;z!B?5s7>AR76)Mo z{!)~iJ`LO8mN%Bp=NGbSn_IWMx#^WcLu^4o*kHS1izV`GvlIOflu~xiT+`Hbfusj9 zNk#w0ctLH!-NY5tH;015B?Q#7@dZRbdU$8hFl9{^6jm!?Pm7MIE_mTqoO`Ra3_UEcP;Mo zqP{#|*CD!bXp_lhkwCh3H$8ndPFg&mZkxOhOcfCa5&#fXD1<>{M30I7HvQs{R%QP@ zzyn7FN^jeJ3c02vy;71v#Y(kPv~pldWBjX%{jZlt;UO#-A#W*kBhu{oZqKA8HOQiv zy$Bo>rD(ngOJM{a1-3YL^}v7YVYpcjg~C|x*PAS}E<9u&lS7Q`HZ+&ef;Ih%AIXK^ ztbWT)>%9xZ4^v|rD6R0;m&~%vAErT+_h5+JAZ?qn61B_|V%%wS`H8yH`+`D$axE8g z06{D3@E-uIY^1C?L0Cd}>mI4Exis(~)9xDJG}7gp_eYQTI{OdcP)fCz)R{86RAu`o z)-jBzF*!KpI;Y34H+C;o0oZ{hmC!G$EabHc`~ON|u-xCiWk5rEO+(r|&s|_4Ek#O7 zm&bD<&ftczNJfe;B#J|XCFBY+eElcjN z7G3_xpZ@-PDsaXTD2DFwon&3(#zs#S-M<=Clqd@on{n;+hO0GP>+}5-W2>(h$6zj@HJ@!Y&*%y_1!1sWmpvV-w!Z!I+p83n=EVqXDDQs-WC>9hOq$9 zc)+W5N_n0G=Wx)Cbmj;*z4Vm={68!NMB4pb9FySQ7l__*C3R(e-V-1SkA;S-B1+~HbZX9zeJL4N3O#oWYAy_uXRNM zpg8R$v`tA624=pwetFhe%+H=eFC687zcYk)g{5+?XBbv(c6~e!ks)6QM1NZDtQ3jzm6sjVbo8MWh(kkkN+KWLR6u0HQ8 zb>4L2?R}kvQ;a>3^aS&=gci8KbMud00gCJR3OH+4@gWbf`*9)Yh$wJNC>NnaCeOhY!l~Bw$o}*v4sF zdxDw@V-&veuMi_NE$DszWvq{^?#7@ZhtQ9Kid8}f{R$Nn(wIK|^@5sa^)s+(b9&Bq z48XdYs5?-f%Ku0#?6;ncrPsM{0>bibEM3;Bff9g1=h-XpT!z1~8{NHy0Pi;B0B-CP zoVn2S7R15YZGS)9W;)X5E10Lw1JkI|E}%t(8FDIxEF|RDT^ANph^z_|q}yjO*2Ce~ zkcsylUP1N%3ktZ5NDL@Q8vqSKc6~XSTYj?u`!Dp27K86FffQOsbkQc_*XGV!*8_*= zcgS?MCq6EVn!~V(--ygdYl|qtC@8;BNi*)YvHG z+$bhS?fz2tZ*Tb?*0i<0!_>~FCllYy2+}YJh`+E4yeqjv{}>>SH@w+$;wr)9m5YF^ z&4>#|oV467!9MDpWhmIB!3Q>5X+M;{z_bYhxYq|gJ5+jtb-5Wp9RZxyAxtE0@ITlg zZ-&;=OaN>^R!ZSmT5D9>iz%S-)%0u9WkMh=?SOw7 zTvEEbQAO@b&GF@lz5{M!M#l5RET>KrzanbrS0C56`L4U)e@9d})d2EL&N44*y1lqO zYRsjp+VrmL?}pzKBcs_ur5Cqo=G%ug3fgOuPrTLa&aw4ZBUNP2tmG{v0bMX$Y6OZ5 zsHqW6`ZpX4gbTxbGWQrQpRHi!NwxdQwr|bwAcXpFRNH7P07vE&;*;ME>r4C?T-;y+ z3-l#uu!pZ{rMcvlXlcrMSita4~Qc|x!#L**<=-)iqEeB3X-LRTGwX-Y) z8H>j52cr+w9-dNLDS&w(jqZ^K54xA`ewzWO*L|PIUX}SEC>}dfM!(3^>gNCGf6==9 zy#W+}(m(2C-j>h6D{Lhi74jOH365q|oGU%gt*)%Tbl>Z|Zjx`KtqK&iituY{X00om za-UJ*I(0FIHMxIb*?=~XN$=_U2Uzil32EqSeb?SV_9PTPY!q}$VT8q0+5Bu}UswaQ zg@yGw!538UPFP?ml(*9Ls+_!{BUzo*Z2vnD2y)uUSZ1I7+d!MiuP00Fv{=&@6iP{9 zH4>b#J`K_jkV8pM;ns4}Uoq$K6%4!r{DdF?IG|Q_pmfqMkhN0xzoHBGKVAXlyg*-C zDyYMHJs84m^cn6qkhLc^ub?ik4cI91Jo%x6Ff@PVhI4r)+CKz6DqC#&oqTFU5Go>}6Qbm2v;X5vrFkt)-T z=5M@ZfF8^Zd3d}3wpie}sl4aAD7+_;A=(%$5_f0?6CE%sx_Lg(f1Ttq;@K588GReo z@+L#6DO40pm8|BneX3RczCt6K%{h^+^*e4e(5)1`M8NUY8?H%Ls{4SZHq(SvQ7lKn zMp>1{nCxf7rO(fkHI?nk=*+2oO5!NnXyI1~WM3Klw0DoGr*Nl4eV#~1Y;u)t;e+W% z0YU*DMXQ?C#hv5>6Y2mAGJ!!5EvYR6gS#|o4brGe9&CgAq{6EphJ8uPgufYsuH8&| zteUJ(FIP?_{&N<$;3;$c1&H?mL9NX#t$okeFw_c1fc_b?_+oV#b^_su$5}^L6S1a0w1dF)A-8*5 z`_a#htUvJ^?b)S&C`SA9%346T5sYFtX7i>T|e2Hg?EZAULB_ zOv%B~{#E|Px8@9cpCGG1ol0ik^ae#D(96BB`MrC#&3**-;1KC-Jx;pO-PV-zybByn$TYWgiKd;EW~ct*QKyFKvu+j} z)-ODJ+6p_zW(3F6ItGYrWfJ@KzmH5?hZufT?c<+pt4PB*Xa60k_F#XKoY=^Jf(r5;Ew7p8{^G0YFxUyY>CF)Nh${> zf2YTqb&0@T^9?KG^e~gdglv7T!bE0;a3DPw9{mQgy~zSv_%f%HL4wcj}9q)H7I834hBqXSz~d-n=C93-py(N%ML zrO}`VF;IAzD>4u;j95d>M3??J>8a&zRI09jF_=yE4L8?6* zOfYmo84BE_+m%|cvGo6|47u5~^(p#b4;#ukTMWp4BEfeizI;064GF>W2!Y{&!oJDt zdWOF{A)?IWBFyA8*b?8WS?kQ+jBKxtBJ>NHY-bX!bD&{aCpK8=(4>}S(d-c=)}Fty zWqW^DhS87S^|PllR<%-A%SB!8NnEz-wLy{vW{za_$84 z8x#ufvTW&^wHcX`M;cHzy1CF98>lX47oI(LRs43{5%@xs$4#pjS4;zx6`axTkANe= z7zjF!M9tq7|2RYlb-?sLiC;}*PN`B7j*%3Qdq?!qI8d{CoLZnu(q^+ns(vBJFSX;$ zP8a)Lmf*anZB4Iy<@LFI^DV0)qo|P;&wIz181=B?wEkr(U=San}TrN>LD7 z0}M?q$NwRA=VQw`Y!w7PK(SG>umrHWLEjtF9Drko(6dlj@FLx#1lw{LAM)jH3f9)= z7a036X-sJMzB8b%hFrS5g~Z;k%R5U;TSqTN%VZcpvt;wUCs%abeW)_eDiQzN5BIj$ zn;eaYb7D&M4c&Vj=k7mz+S;h`tL5tm@`~^qbMncwrG|{Dp(UvzLL#nfLj+@ezre9H zo_fzXxZJl@wl&3RInz}sQhLoJYp^urBd<2SmZrIWsZ(?64*4;goi;0qWw6eTey1sz zJh}y1IVzo}dh%U@Yk*C3hKG<~8)G*<`hc|Kxs!6A)EJai|?ZK8!}0~?6|N2HD>sta;nib;N2Xpfxc}Fel-ktU~bU;(mN`v7nqC>Q3k3!X7^s+?k5M$`w^N+TX;*v*mpLjzQvEy_%C`70=dK zU$z}C4dKr=auayItLEM7SE|@qiA;WzZn9g5x=EUu z^41IdJbiz7B`KiMG3C;1?seE$X62XDj2ky`|I1yCfUhKBej z^It`r`ahc9F5HvCPdm%U)9AK<7~A_K@|i`}t_S#%T!482@M3=?*0SzfxBkX#TG$-1 z4J9c9TTY3Oxbl6B`^QDGN<5*#mZK--4L#|Z;j&5|$9S-oNns(c7YI$RL2%?24@li2 zu7f2Ng}6W1?VrK40TER72PkV`Jrw>|QAAv^$#xQZ_X_;!h~jCg=Rmaqb9&OoT|RLu zqVp2_M+-vTDV}Y^f)pY?0728_y9T~SbP`>bT6OTI6KLw}hoPdpdZ-=O+F= zARosT7DtU!kadEU*Z*h(sO#7qr}p(`{>PJ*l!x*;!Sly%;f(xU(X9Y#V3^UUhp=Q8 z`m=x_&~9|H4zOR1oOspa&~7W_VJ1bBpjOX~8Gza*Bb^?4_-vL5fUe~ZFDVb0lL!T6 zyIw(xhR|Ec4Yb<;uv;!JZ-5=`Q_Z{hs}b4aFla5!1dBo^U9B`5X?Vfgw*ywj2-2F% zzHg4->MSk#ThMUszU0&PFE#!Od{tpL4fki4s)kl1anvOo^eE~bYO5c+#Uj!Bk*8IX z$@~k|(>jiQj>m_WxsME!H-vdNgNyO2tyW*KnEu}V*apzK5M!pR7ZiP1a6@DRg* zVt}8BX)K?h^qg}6U+xJz6M+n}KX(->gqM?}H!`vRQmSO9Q4#K=E4!$(jHo1#YYZZ9yFPP)yN%wqd`1QQ8TTi z@qTCfuR!_O@d$|=mEo5WKPR6kcrjqmo5L!m7cdPra6F1Q3hFs>q1Q7fD1M55MBQC> zCKhXOa>rOJsLok$^f7@kN#L7}v{rF1L1ht?*B6qT~AHUqAeYZ}4R?j&3m;dB)ZFk~z>BJjD zh5o9yhYJ%OduJ62-Q1$z6GWTISL2h`bTL`i55u(efOQD>BQ1LnfmH(qv`;O3o7mtX z>GU#j^uZdl3?<0`&<*;;7sluim=I{YO>HES{vH5&HH{Lq3E>bJ;pxD>oZ$X%!_`b!MI|j#jVRYhs>a}f9O7M zEU|~@A5CVl`V#(}`NSqnj*HoBu1AT20u|&eI^~Vme%<9Wp6)j(20NYSQWtKHa=+FfzWSX3 z8hS`PedE}RO5E_*?`L^mj`7eY;}aXxUHsS(YyOrM<);IsX)gILz~(l|?B%Mudzq!D zEB?&jyaO~O7C;y5E0_+f>J+pXGnRXBd*)2u=(478Nmvmef%d`p(%*o;60{dPuh(&T zJB&xrY2rH1MXEDi9?Ia)r4;@Szz++OHg-G)x#g_CccGwlFyHQ8XLxu1V(EoK8Y;{g zrs!k>*i%Z6y1X0c<}$(p#ffV#ArnHNn3Rn^1_59SX?|m+tzkN2$(h&}%2IF73Dc4N_etNkcI~bT+TLKZ?hQ64|7qYwrO#UK z^#{f)k0PI?^2ROsjTt*x)aEn5&YZ>9xgXiLUA~7pCMU7udPQnJG^D0!DaSD9hq~&o zkIMp^pPUR-`zMg&Qfj@AxM{3C`nt{g%&T~(K~8kTc;(ZM8h7s1oRzB6f$=Qm{6w*q z<+=EJ^D|-5eL%c z`2qspp37Y|dF{c6*~15z9u_Ft@D?mV`)>9E>O-tNKQjP*q-D+ zKR2(viMHvqYJ8!#lVNQ{vEDB11CD5uCfyeZB(3ARgtZa%?H&rwABv{a$Uw>xlDVe( zsDatp&S@>YV4M7>dHcpw;em@pU?Sn?rz;}R7p9{Ie)CH-E3D7O*6&n<)*NEX%jvzH zbTWoT&9T(?4t#URD{R}uKC$ups#$6BD16qq>}S2a>hJXR3z7j^Z6dpeiN?vpVJybw zS=XBdMfSKCx#R=f{9RZW5YB}&+y=bF9Bc;0gG1ZvFPxK>8G@}KBIUt(4rt5N8yMQ3 zSvMXSS}t#;diw!ltMNWxU@nmDh4hDn7y_!@%|dTt}65dE2MXGO?}Pq@@hszLER+I+~qMceImZTnjkw zOTqhJRT_h{PJRNG*be~an%eN&YhSPl>ief$sB4Rz<{yk4J&jS<{dI*x%4qs91Q{+P z+rQFycj+IIlGZCcf~$_WxWw`MV@W$`laf(7#?4A^r$Ive0~xU%sQ$|s^7>!tK-VvW zb>Q3m9lF^@;yb-?ZFeP2AMx=~N(0GJ=Vnp3_H&nhAEQwYUbg?61;}_t=pgLMc-0oL zwCDaJ$$V3m>*wWx_q=4*sb+YhS*qz?|Ex`Z(QkJkyKd8#o}cxf!0$j{sANXX zKiL1s#Wl=cW)6~?m9xVkU%win$}+)$@~m-*&?mQRvGVB@7!3!4sQf$LnkUPhnXeIp zk4hob!T;ESPlP>+ZH`w{BLDXP!=dGQjyo>N{`=Ndv`&3Qfe^(1QHzbdMnRpi$gIYI zfO_;CpOuiaqRcAETlnHP%hfKqc=R=1viuqMPl3+^0P{7eg5tSBdU9~NwxJ+V$o>;AfNj)w9dbO`x%b3?^$ zJ=h7Wr^zcj(1`72#MDdK$C`Go#|4An2>A@hzLZ9XaHK7E9z;TK`|&z6?-D()gnQ3R zybN|vNxazbhIy9tfI(>fSF=nUumM4d+uZB-d0L4id?w{lqvl$N1QpnK(9{SaNmV^^~{9Yo~|>vjoMKkH~m4Er<~&TLBCHCI^H=;)fqlcr6*JkRrT!VIx5e+?5`)*9gNl%}>1#8D0IX-#{kUYC zU`UcqC&2@mqFCW*roZ>t*1+%cKh6V{SuJLUz)zH7>LQ3+OBxg%So-Q+;$X0LCqw?> z;zx@!)te3Y(pv;Wkt19QX0ajsi=&7qzonEPf2mPe#92Obi)j$270T+}uw{h`Y}sPo z{1+}>uR5eP67i(>4#d(e1i24hcz$Cd!Aox@IOYI(|H;>wdsN)D=Fz(mLJqq=l8Iqa z(z-+QBC)U1Kk!(LS9#VsK_L7a)$v(jT%$|+Ge&m)Czdc+ZeFS|* zWexK*zW4f_KU19V77cP~3&}E2|{I@pISuG!aw9l{BZL-i}oYrERs4 zkw#TPC4>f{qnEt0-*C3*<>U{>CdTLzlG%M4Ef+TmVtgo;MjOXVyvLeXD`=9SNbtZB z7UmFeoy<9|5|+Q7^A}`z@299LfA*x)RAC-C-q$xJl-TlIZr4r721&P!uS$Qaupt@o zUnY+bC2^I{;RFzkHm|;|O8^TRGeL-zV9c+CJQe;g{5Q6L#M^AywoHm?p1yZQ{E5k# zxr|(qvR;h1iibd4)-1T%-h*4R`R1(9(D|3)L zQn{Jlc7MCQ?VJWoDL++^;K>D4IX=tdI5NKvJzj3x>RU!n>eN({CDV2@3tKWM)5^aQ z)%uNmvA*7O)m+2v<)Fsd%&O|7x|U`UMsGOZ^cI1oov&{`#%bA!!NT;97~mq>OLm021~ey>smAgM(1aCYVP(@W+BkbScqxX?P1wY4CVTpo~|&ujT3sg6;}K2J>mF zr2L8&hBfuSONUr@xvED;(($AUe;HlXvVi}2SJa0R8dMyLR)bq8#1Ofy;?r$*E80=i z-+A};=X%}eicyz?ciy&>rsl7u8(RLQegQP_vlqHFC2y4rJn@pQXQs#DPO3kTq=Zz} zjMsAWq#WU1%`u%;BgKY{W;fV(7xR=qO;D}F7jD6M zPWXLa+id99N7TknRIOFj%{H(gBk^?F?w>8Je|9Ejsa2y;F_Z}? zu1eI5$(?x@t%ZU_wRc)6bOpMrc#VCzm9+NU_3@PYD$I4Q=mh8RoPbbzV%$k zoLF>8qm8YCqDTeTjy3J}c8{-@JN(UX7Q&F}(teE@_DFiDG}8Fl6qV4eeE(TVyvHuS zG(zBE`6zgWWl)rum8aGY_7W3Au||Mt@%bPgzc7G8l0ViOJ?TPUW8EQ3xNU0q`Dm`fPDll}wb(dg#Gv|99aArx*iGlKhN5 zQL6ZnsKZBf{<*A^QKWj#g4RS$LELIfir+UE)#2zUF zajrt1P=7A97J}dK>i#(nbx$Y;_T7o|6olB|w;;&&A%qyI5VnM6FK0a5_dmvya|8LG z8GW47#ydu}5r6gp&JDfFIxnYDz72`^lN;YHj;>*;+T#nG3}lTu5D{7t$E8YDw0wF7S_eVbXofqOZA}6+v0W2SGx!K_jr5cJU!Nk zk=@&vbx4>#tq_3C$F%$Z1IhnHDPP~)oAfdma@f&Z9aG7NfqN~=kpgM-)d!^hO91yq z;=C%<5dV><6-$h%kLqzCYAb}i!jIv%{1J_k1}5Ynz`;Eu*EcuFl zy+m!SmN`gW8v=;~on5$}`f~c>l!uk&oib!9L75WmGauW>g(M~X+b>AjVw$y8?GXos z9@~BIXH#SH;M`9*??7*j#NbL3+5J&$I7$l%B$8^#%yIWO_du!8UwQ>O1=f4#bFD+) zotY^cyNWY)RU=9Yr#iC#tdz_=WNFQ!-FjTW1iz)WTs;UvGUF;ymvNom5rqxWm}*8e zQwd?`OR{_=7Av+6wVBnYaViais{v6Z-Hhkx)MYEHDK-c1OelM z(nZ5xuHAvWySMYK;`l=(WFsi?=bI4yY@Ofe2&c)K9!ABWND0l<=e!2sy=L^yKlM*r!8Os}g3r9=Hbbe}|VzlMvr-xEW*EeJrdpu6(XsqyZ z%w&MyS1DAYsTPA7QrFM+#|-&s{yN2*w?90R3z0EPYgxgnv_9+Ykq$=Zp=ykMnw0gSz~F$cF4;=_`I0F=jir6PF&jhk>cuP#O&2cyBt zAu6<(u+C^}a-LxKD)+=KS~J#(%_4~{@38F)JzGjTAH#xFIOr>=N@{wJPq(S#o7<>; zg7@aH&wzr~B$SQQ@un(ovwkz2(U2_h&#Hrf2ZnW}e9{zO+O z-^*(-XC$J7`a3zt4LjJw|AW-qISEA8aG!6tkFu?o$lGyf!I$r*T2IhL>2*AQS+p~D z6VuI!li_@qbMeQj5EVmRSYhv{2TK7cE&{0k*OQ2bYj(I)T;CZl>qz5G?IzlxMjT-T zAOGT#xvL6@5bEuUq&e=J!>foLzJX$p55pY?=XcX4E=9u?_3N3xLLl9nALBPf``ySA z1A_c8{EUSt8AS93jsyg3VLA z3b;sGky`(?+fiTvlps23)nU9nCNw=(G+*X`*c>ldmgPtb;awY;JSn6#tiOM3XNuN%*K ztuZC|-7E8>e_^M9fvK1_o0+GADkmb&JFQ9moEUQ$yM*lxR1^eNpahQK_Dy&~+7cwpmSAY-YYH+QLrQJrUE?afiQTSI z-2ss=>yoXy$Qht8z|iXut}%QDp|(=02gOSXpPoK_z~9*m(uTSor@%=cqfb=E`(r9C zFbVCTz?4c3fa|q=TzhMF$2WSoL{;6n9!xtJEuq;LbC3)8r+N_Zo4mfBTPmcZWZ41ZBQ&!L={^XRDu=6WVaMp_{*-6}Gagl`Wb{~10C;=81g_L7eFodUN2HVCsQzwqtS z@miyQ9HI2E+M3BlbSbJKlC~<-%GKL;Zba)x8Ab0Nx&^&%?^#*&FMo-}Q(+NJnt63p zw^Jz;whHG?dB=8KFA=m2IA_6MYqxZuz#x!~s6<~#Plvlz`3GaW>Xjr!BN=k6U9y7r`4@Rx1qW|x(~MSa1;`62ely_Hx4Ss7QjhD53cZR?i5RVnvHFuj6G3|NrvK_X(U(rU+hWK29;BS(KON zfrR{*NioC`$t;FkW?Z=A5QF8u@NGv75-pDmCtRAkmF!l?O}Uj=Uwo^|e=lpw&l`vb zv6|C~U5k(3P-;ms?hAFy_tN)7qDhDKWna1RQMx!=tCEyf2+BZxw;O3-gOdh_en!{`^I!KQ}I3;w-U)H{%X@+MhNbQe+jffQHAixe44gMoB)ki4u z_Lr(@?cD6Sr0lIRj`iY91{KX7##AT}tt#`qTX$e|L|~U(-YP4Kmz3L+$VDmNC|iii zqZ^LK&#kuSqvpM1up>^UAa2FAG05|TmIzXX&W+FNSlqC47F*I*!FmKMO$<3q3e}0y z&q~${JF`?!Wzf7j1>ej>sv@a*ReN-RG7Cnqp7#H3UWp#ml#p8`H23?9W(xWt`^IHk zj8tU7yDaKW-v(4^zKC|j>6bplU7WJwXADNq9 z4k*sCNw1Q{&3tPtwNzx9GB3<)SwHZ$$jp~ZXB|dBZMhP2s8D@odL;Rkls6Rucvzr9 zQ1S}Pp<04K#TF?Y_%hjt_O)KS_js8&zS3juj)t`Inm<{s5lRTQFBzO+Wn^Psx&jW5 z&rrkAo_T!CR;>-2yH+(g4~ac~$~$d{%EY831fn6su5<}b%Zzg~OTJ$6`0J9DS9&^* z@YU|7(yk%bR8h;Nhe+H}NBB-?U4~i_I!fl){*M03kb6z@Yt2wHcblNVLwB4?dMK!D z@i&mN|7w@)1`hr!A$QIH`LZoA>$V$PyQY_^qoMVl1Te80CCW}B{*Jy9c5QQK1}ur! zs-I9gx+I|z?#dzM1=2*TQYf8cCUt;3QpC4QPQ6apqA*YV9L9F)bgK2K|H-_b#PXL! zm?sR-{ErIt+(4#4^6AX0bx=P&t3Bqy8L&JsQS&iUp<8lwHv)$NQGyR8O=?hOhD)Ehl zv0kMSImvp7-*F&STNYf>#dmj~xcNr!6R1LUr9Efei+IQ0!jDX3@FIW85A`G*cc8Lo z>%J@R*L7*rG?=6q6Y?PmZkLa1TI=TlvfJ>`((JCnMTuL+>0UW5Yy+J#QFM z?8d*JdtPJA2F_wmQJ2(mCe z<^s+iXY@-S#6~_qF;*U$7*h#0f)k-E>mNV?^w)hc*-H%A5OU-$A_-IxjD?N-6@fUz zR1^w1At4YU8O<0}6?*_Q9aU>*pUo=Z%sA(iu=VE)n7dviq#fwFfJKL1iGOo&wpWYp z>@j#D#Jz5p>}^3Ntv%op!CwY9wN?%Or2AqR0?~A^a8^~mg*2x4awp%%y)OF%ygZKVm0ov%*pA#vKF-S$9NI{{?mrFNKi@NPahpZFiH$Y8 zm&my69KPKV&JdWh!;GH;tphC%5t6tC}XBeMN|fIl-KwNLWW6E zUaunHaK+m5n{qJH6-L0sm2^6#fnr8ff38eK7Bg|NEgfUWP7C(gP`c6Gfnpj63f*03^ntsRN| zrOa%9y`vypb>UYsR!?l>{`4>VC`vA`t@o3QQE1xxPtQwFF{r_f#k6a4SetA;jE}fS zr{FvFV+9vkUE}NzM3L(c3RkTXN@k>{L0uSnmV*_yyEup?7l-!}_~QQcdql8spvQjK zA%hubS=MV1zP$Sb;`QerKbH566^-#$5G(FDyO$vo`n|UqEiDR0E?-J?Rey#zBhjGv z>3x}e0t5_TN44#X18jf2 zy`J1G+P)9Ty7k|FEhr-uEO=0R47q|)?PneeS(|!4t|E&-cZ#M(5Fk10I*zE%B%UN5 zR@KUC-_NbmWP~W6&r5KA4QjP#C}u$MNd#G(S@HcXN`rSq*p zp3nMl+Mp_&k6*)%2fQ!+%;Sd+Bx+`vn&eSpNn!;tBsHqGhUV2Xv!%o!)Z5E}=a&TZ z725$X$ek$>_?@rJ@dpC1Yg(aO5S8iPEfv@`Sv`)6nGUL0&;B|cDPHHrPMqc~fELSK zCs4RJO1$pt6&OEgZfQC^_6Z;;qzdtLiBB~AjrPL_JMKT*Ymqtztk*E8v*FU$>MSF1+Crw+tR#?;K#r-fe`{KPy|<%v zSJIW9vKK-5E|t-CQ@BW4tGe+s$+yk4;V_AQ9#9nf!Z_r`CvrACw})FGs)8>UlSV8q z$3zU~`8^I|)DqUZ6)S&+m2%bPc+Vw-^6qS)Icdbdd2Sz<9QKEamlUnp>y9_QeUrx; zdCkZsoWOry8ZY!8JGmNL0K5D$jH19u0whF$SD${pFjIK&DQ)yYc3$&L6SsqM*Nl(6 zA07G#3VT*T3_F(>l0aQ0(e6_CazCYN3o@^+XaI!<`@>svU5r}I4-00RB^9E#ooRY! zUdZV46i)DULkeTQs9^Kzv#uuthP;2qI13JPwew|OA4eMr${y68KS-ke;++b@8@Wy( z!WnmPTXBcL;2!6_sv^VT^ii5eLbu*QWc$1^#Q7b>5u+#lo$BAZ#BdC;>JTGwUf&6a{CX@LaBC9FN=;#d)9j1KZ5d56^fE`yJSJL<-{XbsxGS< zDja(6rZO1VK-_7K(*+L$Giy2V<_-^mqE|3v)Vjb0HG7RM&ju)D1q$vHn0CoGBo*}* z^qucZl$~>vA0yPbgC?WMoETyrLY?+^n$I|s2ICiVD{K%#1M1`6oqgzwlAr3Dt#LVy zne{OfC3Nt5f)?9CkJq2K)SYuM+WxZ+MU;Hzqpc=(vE$%r#Qlu?!MKbmbnz8_TQ7?7 zfd}GMkhl%xGDKzg$>4z)p-DA5{=gFEB|p_W8|8LfT7NEj$k)0(S?jfkZgD&=Z8+ya z#LIa`AS)Oz1#F;JgQPba6zmwl^}Cq5pD)?8JvTV~wu7Kq9_7GhZ7{4iBG>Nd2Kj6r zitNEw&eGF~r8@9apn%NyUo*P`PE#ElPVY0Gm79s~?*bOqk|i}h?kzXJ>W7y_ zOdiF^<(!f@Ib#l&h?1E9e^8aQ5=wcC< zEAGe8`qe0&_WPDqK}aC}+G@1F?a3x`^@Z@j>jys zmwvOCib=HW@IcH3rysc_Q$3>+x2c7&?`$D4Ne?<7Y_7pbS+Je|$?XwlUSTyg?KR?& zSTAs(xTUq5Ur>YJ*sR}gbU|&bEvwkJmfQ7*h87J}KSGSD;Pr9Do3VbuifZ6JV17mj zFyN^YTrXRcnHObPU2P^Gssud5A)WgA;!N>z9RHDqin@GR-q-1WDJUNO{x~&KB!?IA z^K8*Xi_XY;%JCVwcn=q7iahL*0c0poX)WaorO+L@@ax3Y^8!K~Fi{rjtFv+6QH>XG z*l1*E4DNNj_&reGJ|E0ObcQ09R-NWqsrTK3E2jWDsx4>UxM!}3x`oJV!r6fOzk)Dh zL}g6F&Nh0r?i`dsRL5-atWQoui!RDMiMIPXyj3?}{MyWX5-_AY=G6sANze#oN4vTm zMTz3MDKi@0ugB4J3ryt4xW^+Dyve1{47~3VWh$d6nYP!^UbW{5GndNX_x8@xIUMuO z`mn&xn7H{`PgQ;gUn}04Opf2+l`)8W@F=ZuEQh#VtFJ<|B>&{TJLMWfM>uQ8tWXb2gV7T zQcR`o+MSW02F4~slos0eYs0>f3q#$xmD_bv!w#DzH*6lkNBW%L@U?8;>-2{zEcHyF zsdz&)NBqeFO>zPA12^gCd?ksB&t&*)Rihwegd&)^WI&re?=QfrJBOlu%aMIdO$r8; zL<8Fzyk3qN(hhz$ux|2=LEMT>CX*VqSYE}Y4nqLR!Ou*;9_E_650Rjnoii9QWqVHJ z%cI-shn{b zkm#vbqgXR~I>%_)@a#}SK`YFz+l}H0P*0&9^*b9 z&+BRArGgEbsNnm$2=~r1{mPWEOrZ&+_0pJ^U}M_Tap`|I<3PRkAJYuTz^u=cW}oi% z3@X>*2m&%_fV=+c<&UwHmKlJb0CBT`V`S{>1?!ky*hQ7_+ES3 z%kw0aJo3qO6SnBKx^|_=_xQoX^?G1A4D)I1tDNwxn$~<`d}wIXpH{T|^x-AsvGY=L zzzFAu!Mel<~^z(LDp`|;jl^*DqJmP^L&>7?u^6=X2R#6{5`-UKy`LfS#lPq9v ziPykkvmh@Gn?XGx1=nI`FU#gzHGwj%_+%FdWQ-ltzYt+56aSHU9rzUb{DTXwBQ~S) z_tym-Xk*Y?Q+Uy)Q;^$hfS1ZU=LqEcFy#%NQ|heciKAl^&yY?YQKU_g$~te!1(Oww z$W#33{o110>xu{sl*$laS2nZ%M9M!LJ@4>nCm)~m)?CY4#f4T{Ah6)`)_nz&@j4V6 zd9J|XW{?l3XKN)&NaN zb3MkO-MNC)K=N}9=Sx4W=4Hj5KrOG}2L|iR-$(sy0UQAPkou$;4JF(3!Cb@yQgAv( zky$oBuJK^N@l6x-7SSXpFqU;O)r^PC^}(kuiac|bU9Q6rVWP1t;P7tBf?ghVPWmx; zQ6BO%EUCb$ae?0PE!(oS8W}6KR?3!ySh`NO#ztfve5m?P4GL>U0{f>^ue_WCH*6C2 zK4o)##eOKi+;o24Nyc7cv-f@{9Qt&r9sWe2Q__%*K zKM(o!5#arIVJc+2KWPV^2x{#DnE;b#opsWM|vwx-$Js}$t zFaPx{uGEa&U<^B7q1WF;7c!ELVh#5l9~R00lMVtwr;$nc7MRp{hvA0)BvCgXh^TZ+ zh#`-2&?XuINg%vr&U&iD*2&He_$UeeGw58C%G~)iVLjC6%UpB^+BqOj_G1mr`vwvV z=o;o`NMWW1UVz6gqlzbQXe`-EAtmCmN1#iBmp@qL_xaA5fD>`2Fa`9 zyHebN8s=7{I?JAA{jnt$Tr`XoK$s$p{ee9L)_3%ssGZbgc6$Fw)}iQ;YxFF{4tJpm z0gP8(il$&27{}YXSbpfpy^r@MfQu65G~L+1Y+YZxgd&1k|Hs@(tFBL;U_w2hAv{mr z@P=WU4tS@rKZEG_t|Qyj@g>|?3wkQI%Bud`HJv$}q37lJ!JJ~gG(w3MsLPNSrsPT+ z`vC5XCi5YV!rXk3;KrAj;}}esBL32GJK?}lc)1d=m(%^qcOpMIo~VBi9Lq@qdg{q4 zsqlF+6p#lP>b~WN@|tRF;Z_tKWoVbTk;Ow!{(!5PLO#)twC_C9iZs9s+Sj&<{w2vD zao>McXd_X%azN~&2C)>__H}QC}x4J}j z6f{muBJcjhVRl){B559?H8-}Oa2*7L<045FdHe>Q1Zx&UTy*((6P5+?!!RPnou5bx zY7?U?x8)Vt+|kB^4loJ|x{};~xKy%?f$zkba|OvDs9!Xz;Mqw>3P_Q?kMV_fk;ma^ zOD@Z6cTUf>%;M@#j0|$JX?`WHgRDys2gGTBOJ45_YYd$J5dB)z19ItJNcz(LC_?@W zQnn^1w7`uRHzWT3tISK5HANjR5CyB92}a9#L~Ke{MQ_Irnd zZz`^3m}JiO^EDL;u4pu4xxbe69sjx3=6z!{#Z^q!A;URNu@~CwnWUY5sAJ;cW%WU7 za4PBWBFr;zAxk}uy<}MGIX&o?D<|b(+zDP^0xW)#(lUX^*#vo9jVW+lR0KD=DbA79 z$>79hi5)!?h+91gB>%KKJIxOtEtBS(?mYN$GXL0MioZW`bq?n+O6M^j!(C#KB8|Q=$veKb6lW)nRxTp4~bJ zkx18?vV8r%PrH}Vfaw&vou^Y{omnc#`F0*vx2?m=Hn#}h_v9%0h$UpaCa8dGQtifi z8XJKhb6)VUz)MKtp$6$8%SAky@g07Ti){T=9js&mNX8?e1}dYenjw zXSTZOqMWY740I1uN(QZq)+o3E*c{z`auPiTM=JDkx*Lxk;S0F20M}oaJyWwtD&bnY zNo*bz(vXbb0xe1DbvoINtGNqO%Yyr9s3n!GD5z*0@;EnfSix(O(j}zEWFMhzi8RvW z#(C=_K_tGm0X_T1LwO{_2`=(KkXv@*zdAAZ9;5~f5xYN>R`Dd3d;GBd1<}(9`NGv) zCll1Y>+xldyWQJdKMJU*h#4C-&ui~S7>-Q*lKAihAX$+BQ4-Fv3#Fa#yxXd!-=k8P z#q-ssDX|%$<1PB|_mNkbG^^ItzCaT1C|ot5w~$ym!h%KrHn5a7atm4g!YEZXd?w(a z2FVAj*U#_0*D*l=zkFlrDmF7iCiipXO@-Y26{ z$PjJ(4U~dl(WN`o#BI3vlmPJV9>8~8U?ODi&QF`0-EQ>r^x;#OA{puh=A{^|Zb||# zQpQ4G9e5H6;AKMOn8_Iw<-3Rs(ZgGz$}bRJ6i0yh8nT)ITBmsjd6dIt(80ZbWM$FA ztcd1sc3I(ncm8tGL!1}{CM{v-v-YIsh|J^JuRAsNE)V%ru|N}}AA4ZBBS?LV=aE1& zf~s^w2IfCMWQ(RxKn5Vt;a-P5A=7r?(ykJ`6d!^C-hP{>G zVNT%F`7=%1Thj8xTW$i}>t(}Iq4e&cgcfc5t21mkLDbf3(1^=ixz?6RiZsv{zn|=W zA`B#K-P&lS7r$HF8#h zkIRYfpwtl_(Yc${+%;fMe`w9~ck42w(9rDw%3Ycx zJ(>&L>ksb(=f5%)c=JdRW;t&zg8FhKJ~Net1)~uU1}(2>nba{!FW0B}qjf8KmX_j7 z*^&>8UpPJfe--02M<HKcb{x|P3OdWGjFdg(`=2spMAcL?lt$jE0 zY8U&W;&&fjjE<84=2P)^Y0^>2;N`l0!Z8oA3y`Jeq_hcka^;*$o?zozyAJsB<~bef zmK;SOD|QeRd5KuikR++oCQ9ys1_VBO0%Bwz6jEH~nC)HDQA7F_0Vw;MXTZ7shVlVxt5(|;2p(0PoV>Wkj5k9EL{ z;ekBWF#;K)V~*h!CjGKy$?~u=tWOw?_xqj5cjeGXUtMMtlV4W&vdd0L(BltV3^!EH zWisay@#9{(PePM@&&|%AZyQ=&^%gRNvifmC5Mr&_5(IsQ3>DhD?pWb_$#9)L0l`J3 z#~Oy=MJc$$ceVBt(W8LE#Spll+{6eVWQRW>t5d{tP(5|wIK&fB5eQ;1Bw0EzoW;Fn zZ@mOI2fPM#oiq!QYuS63rcWEy3ML(lHvC}+fCYx0Umn_8OpVcEP2V#Ya_++^X6*+o z|Cy^pWj1eZr7oH^s0@kUHRrC=Ft40x6G^OfT5vrKnRQzb;0=#Y09)#E$pGxJl8V=! z8P7qorl0&4#w~X$H8No2z5~x`h!Us8Rqp1NGG^HyGVb4>@ld|+_-KoE=*aI-F9>To zto=~f-e$$Cnja5Fnq)Hnun6p1Fps~=uFr~K-w%?S_(d!IVBuB&*L=o9_xO2pJyd{+ z^gB?Gg18gr(X02kg4)Uo8)!ZMa*ZWjwaviF;`cT!+XQB3$M&WAKBZXu&Ug8m&5W6k zwkdLSdXcN~3A-EU#fhO5PNrgF&?(88Ak>-=eqrtB92U=1&xz|S;+ z*P)*!yoj)Hfol;CyMd!%6FsUCpn><~KFrFu?!9}9IDlgJ+1)trfqVd02pJ$dHYxTdmD1`Z0q@=otv6D^&DZqu;B=JBfJ6`5`~uU+ zZCHwo0&E6rXV7d-Bb zK;>OjQd{+(n%#d|tKKvkkRJ{^E=A8K*p=PmvYeGY7^7r?t+R^xTx`h16q$=adOz2>xF&)mE-(Ra}BLh?XNCJSt+pm-7Vve$epiB&B867r@oG{x$Ad{wYMdAG|{te2|91FLKL=`(l35T+yER(xMZnwU~SQktM^n@KpdEJ}QH`_iuaGc%3moq<$UA zo$-wnpQ94XPMQ%RD#4_~Jd|&(3KBmLUT(i8m{1F#mtp67wAMZK;hgIltQP?Rmc!xx zlu|%gU!_RGWQD*MEt|urn*v#49^Zq?sN#Ldsaz072O0d;#ZBNK8p1GI=BuPI%t1)w zfe=Z7K}tk^I_L`_WRYajd@w;4Q`R}0ThX3jB+z0C9ZxC@Z5dfD)lF`~#8+7B(x@7^{)gpB@2h4T4E-_0)XYk}5 z^fpdk-g|vzLacmOLjF)EMx$qM-qe*ZF?dF7ckEQ4cGvzNa{~+Ra<;3)Qd0lNIYv_! zjYw5;ox4I(U?#&KbbN~H_n+F@2hC_?qull6@7HnZ65fqyHkAer#>P98E?>zmwB*>p z1rAjfpeUgQ$ayM;Y9>>+;spW<5VPkId#&ia=^zQjS$q$nfD5aN_q^ATof|Dy0|j5^ zbVpZX{|&lBLcVs5mK;>a2^f>_OW&FoNSxPb$jI01-=2uTP(L&+f0QrW-*ucgriOe- z*2`sFt9u$fwon&$Zungiqb*rIxmYHNwYl}@c0-cJzLA2ozg??0it5tCrx=FsyxcxO zfabcgZctvA>2t^*^ayV#GrIp&%Xz)ZO8@P4k0gswX}d)oAAT3(b!93k#DRk`@fDqt z%l{#d$vBiRSkJdsVIr24c~nxvk(?6$?BdWCtj)tD%7QdtB%<)HxGUq;fI`PH4cSowuSysP0WSRWNiEtZ$3pMn-5ILFH@sXKB19up zJz^J$IglhNFg4PE7CFvA)=Oe-rRx}8>J~bk!cyrR^A`?QYC&8HZGM}?Lnc5=@M zB0Ap)@oBoB)BUP+7M!hYppP2w#S|v$>-o&kY`%SO1(QEne^DEIIj(wbR?_TA+e)AL z@anxyGveFj9;`;W7ZdHuW*x7{JAcTaYE4kzA70j0Wf!;YL@r#tYTnx>c4<17EU59G z2x^80eoK&3V*;Q6arULJ2BxFaKuFwquybF?KN1bxmcoe3FcV-|+bLe^xW@Xm9A0k$ zXpB-P)&dU)DRNm_D}aV-y0U8e5Q&;$0?+pOq@PS4hBiy&6KMpmaS%_Fjwq0A83P=k zmFc;jOd3S2DYDnp6lvb&Z&lZah0N;Oqp#PW$>ISI%MnGj5IU}+h%U8>X_jwiUNLBy z%d)R%0mo4BT8aE$m2OysyF1u|hQA-f-Ikb#3)aqmRdIMkssxj+b?={ILvAN%2OHcP zh+4^#N;=YNg^A*MM-A;`r*5W{%fM&&MCkN9PeABp@~9Wc3#=7>CP(hl@Gf`8=bkIc zw8-m8h(@&By#|g#DxBK@C?IO!84ZvGC$l{dE2e|)UFdN&Jkbyhqe1c5*8>{krwkMw za`!7h5JIHj1au8ZAKPk_!X;TsCCesVd?1(m0?E^RH*H$}ZwlB4Nc**u8Tj|+GySiU zUChCQeCYe@B0d=claZ|UJM3Uuz7tw*?!PegtXS>dbJ-*#bVs-LA{`tt?o{gbC&|WI zPl|oDUxz&iad7)0178W2CwCUMd}}O#$-&ADWJTYX=Q}<3_wUP$HkVvbnMMfR(^_7 z)E~fE2|CXL&`mb-Xl?vK%zSl#KRQbbSst6>b_dPgVOJeIxcXtQ7Cm(jJcsi@s>UB+ z^Wob2cn4Hs^MNqw$OmbCRrWksJ0JB)BIsLkG6i2yeaX+T?E?z}&hcfKJT$!m0wKQv zHU?EHysmh(H`)+j4NoSXxgV=D;h?Ukr*~QTG=;KF{_E$3;kmh8%wt?6P21_+zk^)j z9WIbv%SU=o$Ij;R373i;Xd`vOOJ99LLU1;E$#tzhSr!d572bDby4wxbt%{e8eF%#l zk)FGf+r2aW{*zM)VgA{AZ>qC5^K-$C#YQn{PiHfvt}&ED*;g^{bEb#4*O}jU4~=yf zp{ZUz(3V}lVgE$|W!q0MBF@|wi9yz9kCm3YaBbggxHNND)L2ns!?1mbZia&ILLzy} zgF0N4BtUeYfj-$3fEA=nL?O@0CZ&|25Y=c*i^aEvRc21!{k$dNUvx?yiLm`@DtCo9{pLiEH$yZDGkGf)kWcj$r?Gp$8~Y)p!o3Y)O;mwTCBshq^7nadDa zd=&a3Y_c2IFID3iKuwOcA|ouB^F)0Ak|1CIq%;_QcHdaQyLC2+=!n&U>f2chL)>~0 zpAr6HLT%G^bFEzG+f|rd&tRWe8u3;P)FQ?at}!cKpRp=~F;}C<+TQbHY>3q}f%0~G zbO$3HeMP|)FA*qo2u#fFN}@1$%tF-oIQ`xV@6ItG(*V{`-UL0AIVv<@=sOYx;>|Vq?xP+bHkObL~nKWui4;A%AE@;y}ncZV{er}1dr3u!K1me(jD(7EEA{87ZXfB`t) z&UzEhC7+U#fQi(?Fqtfm%jo!9=XbTE#%2QgTLQ8?X=MpLLkVehbGJT}^=`f8@~-Ey z_k@Wq6kwgLE!HN^Hru-!audgIH{>=-%-?Untfl`%k;x8(P1_~AYw?KZNa{z!+!@aB zwoN)^|KthXy1B#hd?d}IyCPk8zC$V8K8B;%Nzmuhf;)6eH%wVFtu*_LEGJqFFggVJmz1>{hhWUh)Kh9sL+8VKr{c zpX7DFo}u{Mw}$fg9ldl@TBCVPj zYKQHcKvsV3`vFabTeVGWApexKO0xS&xn~{{|C;V}rJw(-JZffoIemau!p9Lu&p@HH zC&6tg<+fbw*lNiJWx9Z9;v@2lGh)3RjC8Zg3zq|(Mz6n6khwdKT=`fvsa(#zWa_BX zxi0V@j4ov>@|284En>n(1wT^OUqnsZ$=sS^&Z=(B<*m>?V%n2j?6)4E6ht&XT!%26 zxekaVB!tPXR5O5l=+CBc3d_*v`hghq@kVrLFJ}-hnR{{0)39u5GV%2$xTqX?3)eXw3l^_7jH6ed8zdk0VeY&M#7jQp)yw*m=msoq39(^#IxR%U&iw?$T#p zp@l0i>ZoFajEvNA&9gDP=WhI6|73c&{CacQyTxwMjIQLhc1r8)a(b-8tmfAmpXwNN z^6wQX_s7rr?T1^p(J?ggRr;3cUL45r!{CDBta9B~tKB;%gA74exrr znPB?~-sgWe#GtQWIYZmL%FznzXMe)OkxEya!8ZxhMT&amh(q|EGyiywdayxK5)(&d zI>3c8#3|gOQ-tES{MYGfJ3B!bowrLtgSi2B`0PeBxgfTj9QTVxcx>5i)oMQJ5LZMfh7f@lAA6xoA~;AezJF9T4I|qH`d;LIQ>-^kFsnK=pq|L z9?B-R-*ifkOSiybL0*6CxkGT&$IHN_egN5X%CGuY6AF&Ch?UE<)|0pzK+PeoC{<&D zIAQp zRV_965liNRPr);19Q~=$Ef0-uYbEYJGsEwiXLxtUI=g@Crh)8Y+&)Q63#dj?d+kz+ zWBTMM2_3lLTf}(~%!DG*E_FY32#Ue(t`gp^MJ_gtdxnDG^rNT6#1Gltc|R(yErUXd zI#M)REA8JA7|!s&`b7xe*uTmYHMW3B%;tPlW&0#(6i1g-D?gwuEoeEa7~kw%BW5|V{PrMM zqGX5gaJRw)F^Rx8^dbxGN3`_mPI*bJLPKqD*Ep|(1lMB@S%ij8TROudv7Cbm3S2~Q z(CCd+@tYvj-wAI43}DK&P%_`fmczNmMYLZBfLrTGO+O2H3T4Xe{I%;eM#*Z1hqya) zS%c&cLfk6}7EpkG*|%!(FRSDJRkS}-KK%lmm9WU9-I)^Y3U{CWcZVq+B2>Y&rhy)cGOm1Gt&KY=hDYEA?Auc9qCI>ZOlKet)F4& zGAH|ZdKW~y6kv;iNDdRERzMC2+M%xauwym6*E-YBaY_3#d(sH$5;8FE=0!>xL( zzx8l{E26g8*6A8ee*)q)uE1tpgKfu9^*pkLBmLF0G077Fu!`R#B`@_Z=7v4-D2VLn zb#D2uIIG9-Ju7Wia_mFMu@4!4kO=H-{khm@XEnH1W|F!E>*Vj4U9ZUOee`v zIOSOiE?plp+}?G~_1&74_ILJ_oUQ|KsTO%Q!hPd+W*WCVob4!HjZp1bU*c#W=Fp}g zx;P`Xt{8(l@~|}2{g`$RPmR~1@)G?C4gL7-r7w^tf*my{{{t;4pf#low69&Unm}&s zXAD(SKr*@jEMO%=(3zK9)ryc1#A_tvQ97$#`a1C8$At?t173mC{u7aM*Z?+b)Cd*) z4f_0N<{}iraTPGnaa=2aKVXFnM$}mfK-7wjaUI=~Mz0QoO2)65b_>y zCn5!rj^S*BjC3VNThR&fvaZFsRw z@>oQ?m+KIVq()gV+g;m=h73y8psZ}*j%gFD>&#%K*`LY13R20)B(ie&T+DI^bm)r` zK7n;LNz4@O^{KQe!faCOQHg8+(MxB$ZfVE{$u8p)jG&S$!Ly2-Z@#3ywyUEVV6*58 z^!L*MDKS0$o}Js-%lp{MU|;D0EaWk~E8hTTcG*CCqS|0oea%eQZ}#F`HKHT5er1zf zuy@D>`^Yvb5EhUN3RulOJon7ycB&RVyev4x1)^wI21w@&+bBDfd21u5Y?tGmx#6r7 zf*|oK!2M63BOwr*sZY$i30wB-bX*&1pl*SqO-l*V!WWQn$~9O)Aka}F6*Xo1aSE#^ z$BUqR@b?s?y+`^WjWoqk=`PkQj2d=wP!rd<&Q>-02BH+!%nQI6!LgJD z(F0mllkD>2R_Z4=UOh3ppwec3$9QV2?$ql#gxSD)+85vW+MqGcnMRIL)Iqmvoq%^f4 zA$J+XF&20>a;fHXjw8n@rF&!7#pVwBb;#cdfZ?7zHH_MTkWX4_j}5?*4GY*hI9jQF zNAMrr)$r_RtvtlD;s-fM0JI^4{Nxi3jpxPvMC{0*%&Lo#8P%hT63^5nhYF~LlrKXV zX3OMtf|}Zh8DFbKv1#^@RF zcn54rO)8XwC=haZ?5Zj9RAawJaEZFC-!$z9W+G7PMW#gj))^W83mLh~Aa_r?g{5#8 zGnbAZ60~2`*2DF)-)^*+;6(t7NZZ6x4b};R4(_q#{poz~DRJHWyIyimup2VL)rsSl~nfW;{Q01FTkwch3GA1aj?}Z~MogG4;eo zpX{FP8m%zuKx@v|iREYf4{Z!$Qp%R*y6$n*^2FCF*r(Fr-C|voL8QAV7q1*2H3Y=6 zez8ZTDvP=QDYz_!8-Jha*1W_D;_ua(A=*3 zt_2r$G9~Vg@2bHnrv4~~JO!~cD+lMd-+%q$fu}=Pb2>67OVa2o@*p);v!-{g5+s{l zqTu?W70OxZ;6fmE_HE-pG8#eZ#@1qxr@Z1|&Eq)P;|M`MsCY>?vo#m71aP*_BhfBS z+^YJ_jU{%`E){O*cf#}ONAB>NnakttxL`KBzj0{J7K>QS;wNTncle`oS0NGWhOi$q zmz^dWaH}CL04n}I0~-Lcu+`=$SitL)L(IPz3qOf5DyBxC@|FBIHQ~>} zg|Uc00dH32{iV0WEEAIi_;tt^8(DjEi@4F#&hFepsSy%pJ+;DarlwWpH+86;gT{ZY zKQNLKbQXw6hIyyr)8T{Z`4XcSSf!oSX(oGK_(~NeNqSm35cLdpRo5dV*v%G^SKH5Q zrYR?tmMs@xbq>cW{T2FYwdP!Dx>u~fyw9jXn2AIh|8gADp><5fukd!cTE~AhsSym! zK^o+*mK&QKmG|y-4n1K)bRkqD!-mp3*zcJ>>u=f5D}OZ65lG1=M)&M#+ery-C%i&J zyf)8?V|wH$#~H*A%%e7L!3K6bl!bY?00#4K9+ZPLnS(&5Zh9p6bVW^0E1(!X;2#Eo z)GzJLP-uLgpP^{C7O%CdG`6EzXA9+ z>_Ig>@|g2G#B*S)6J*-fdbT<#hWJ;ss^`w8)hXET8@x|65-W?s7i-pUTz7Vu{uL5g z8|xlu8f)A|Ydvr#WKYke2W=_nwEL!3oztl;KouiR8F@i#`6@_uACA6{@i)gYU+0i| zkRGUc$x?fQau%!G^3Y2ie|hawM09qgmu=}$i_egFlBJ^jM_1av6W<-M)>+Al;`R2b z$C~R*rwLQbZ?~0QG@IBJGaD6_wWxZShDd}fHQlgg(H@Cx`2{}^pm4HK5}L(qmxZnY z^*E9rfQx8I{YL@r3?4v3Pq`zlqBg)~7Be^a+L-Ms(67l*1$awy(C5@m=J>V{;N7lfZ~BYe+*`vQ*=ilP8eUt?YM3F{gXddBeFqlpM_jh+O30KG`-zDGEpy>c^sPe{a-qm79t_ z8mM0VX}pL#li>38p|qJ*&(_PC{BjlvtaojW*22iPv>ol?CLL{%-!NiL&tR4~u$9Kn z)ac&7AF&nRs-5&MSvYfc@Pjw&zK68|?fUIndyvGvc}P`RjG3NDmMUc>i=4%v>!{)O zi@Jw-tp&l?3NIZsK502BKK&z}JAg|~#@>KGA18a4ThX^ZSAIwc5oE9G9RriR0GV{c zleuoA>=?*D(;LKDP^;X3Nx9!iIXmCbE(fvvL54^!;Yf%S@fPSPOIIr7DYBRPcj=UR z%cX0wK>M(G0)Qe)c1jMkQ!`=W9VVbGLLknS4=5p_*W{66@EKYS$DZFa*R}TjO1wRY zeIN~mx6JK%h|(k4S?ss5uC+Oli(}jPJzA_|G_g4+KNz3neA-pteDnf-AW%HWW_YXG z-zO-z-SFzgw2XyCkbYSE80WwOi%H7tpV1j*OV%dFJ|6>S8>U}u)ko|HV{D%JhKybM zMse@#iUDrJ6ug&eQTs!Sb(n7YwIuJ*=eevq06A4!gU3gO8SSnmZb<;C?z<4Yz8@4P zb9ntDyc{{xhHRsP$h@EVNB;?z_Xu`|D{e=(^H}m1J-ROO->5mcz4|$cc6Dn^j$x>T z0^^YY2r6(AO#lUgS1|wh3dD{2FD@`$v6|y;I`X!BdBN=g{wQ;@7m1aaSWXrQ)84KA zQK3gFaWgGP64T#gDjJWqtqu+*bm$xwiVU$v=T3PTy-&O#y}i51+FTSf;OrZ5l0C^t zHLpPF@hVE26Qg3I7SywOU8|esHci#1f?Q7D+dueNK85W3^tEm+*NZw8=&QYdL$%N$XtME z|9|076%2Sm_GxfiY`^A&xOs*F4L(dgN-IJZQ-LBjD-V?*otEY|+4k2T4}ml9t8VM* z`@)Cy9Ns#LdjqG8q>Ch7>Bvvu{+oQD;KP0j;)V%YVZ;JKJX6x>;m9Vx;F80L<}8$^ z%avNr7^0uZJ<+zXmr7hEB0LMivX7J8$XFeM_5r_^*9MPx+6E}02`M_70cij;|JRf0 z)BqHW*pgl>Ri&yHmg*Zk4gkwCkHEJ4&VtJsvNLd&j7I?-*$S2#}@YB;rXfIsYN%_Y2Q9Q17xFz`F|>V zRw4NzDd3&JbB+dvvn#I4T}QtUpDjpy$avSrNj0tj>YBDX*2ich@W#&vjIXnFfNtwU z`rE@ka0yYS@i_KMWDI)dGg@#w{KM#LHp(HmU(q)gEuql+1i-%63&?(LlWsj zQRAI2tlFB3RabJ)Jj&|&s8y2CDhC7(x}8!b*#AB$c_N?@0OBM_I^thaQAW5yNgsx zU(*3GObi;wec8*gdOtx8Wa(iRKpENb;D!=FpU7(~{kOJuMu9W=T=FF&GVeG*oo+03 zN;@nrC>mQI98_1=-t~eGM0lajYO>2BnyaU z^j7pL>rk3*mxBP~>=G_BR?}B~FC8Czm_7xaK|MLiK(_{FyBHudAhXwIJmK*(mz7Fz zZcXSI@?T_Lw)+|5qp^m7dA9&cBjWz7U&*oY0X_QOrQe^o4yh&e7;9yu(p;#Prr zZX*{2RM(zWHoIw1zuL2cl<@)}I00U&wj+ZFQ!-14F1de9M#YPRKk3b@s~z9H&mb(P!($aB@OnC@$My6mhi(j6 zETje#I(BOWSs!=Yl>XzavHz{G$m%=PF{YT>N`lI%WKIxVu3x+#Ci!qP=vd-VB(7`#z#FSNd;3=yO0t2O6Q_8sIE*&N6Zfj~rlND?7qoCz_6hIOg{G;3G#Xs9jl!ZHfH2j{=L(tx{lN-euZFJ! zcHx`dZAi!v3OveS^bfsDe|rXIt3p2k83p?Qf7K$xTi3{37KbV}29JoyyENy0p=_w$ z^9?aDc%D^~RPyFaNZVwa@poDu$octLvejcW58Xi?XX$kHTrykpYy#0A^ zP$@!2b)vV$6Pp`E?_NWTcjaB^XL3*^UnCJq>g_7Tk`|@>G_R=O_w@4Ib|v813i-fF?Nm zZ;TH0sDIA|4n-=n!PoH*D=eXPd3!#mhy+fRLunmc7U91NN{9CjH+rapsH?xHTkqnC z_TA@4N_p?fkK6`n44exHM=1&bd_e=*bwtklDGFJxh@(@+J$rHT) zQp@8b3laPz_ewv90W|%g=s_N5zT|UNqbWOC`)1$DzmfUP6S}5e$(XtuLt$zJGi@&IiCr>AXX!te# zlBLVx(N|w=wZ{MOqEmn{z~b1wz|)!s@&K~9pBxwzCm6qV1NbS>0RxRWQ&WWMUR_;CRe!80r>$uF+D77JnY`c~+*FvlP6_5)a9xP}uNN$# zX{~}wFM0~cO2a3=uHUY_McfDNc!Z|Qai4HYk6n4w94czsuF_VZQy^=578AX8dolF> zznrqj0P1_aIXBbe>}ha&OD*!$WW*vr`biGJC_V`(k|37|t!Ucr$|VI1EC5B0zM^i8 zfMnz;^WQq+X@I}c&mFG*zQf;Tx*ZK^j)IxDgPXvB=&INc1BKlwPt91-8ov*u*HY=B zq=U3`5zW232j?5-DPTIr=!08-(>H%lPyo1v`Ed|r^De0u@^{U;V#&RvSoW*8fcu~s zqY=sMjmmMk+9-UI=6lU1V^l~QAiGDC5ahALoCC6}4It|$wP!fEa zQmVe4|KI>G`7kO{{^p*XXcsdOAe(HXR8W~~gy)ZhcIeu7#B0o8V*&pQJN*KG5O&r; z%A{|8h_?jj*|r?r4Z4@*LLYo5=wBmdduj}3LX_An$v#ISv4|gTso>Tu=iB`o&A^KC zsQ7)jd2u;tztOgd`V0H0ygV%3LJf;ie6f1Ln3 z&#zg){R+NWul2dsq(oe;`n$ZM3Ael?A-B`uDsWZuSI)CMDi3%KPnwMR7NaY5wxVU6?$( zsU~e$#vXZM0P;UjXLCXH1i56wFs4b*svxpm&whoyEtwLjU9RHHS1aH6LaFSES6K)!sA#KXg#BGq{kkIKTzqOkVM{%tyP>P@5N>HQ? zP$)8%prO&e004V|ThJR>Z59tw(GD}0pNMi0Cjg-t)Ul1iXvhKsR|wF(0#F2P#{#K2 zQsP^vLWLirgvDx_|7luf|B%y5HRHfcslU%_xykq<;4?i#!mPvr81i2}pM`aAIF@#6 zu82&Whng;a5DT?k9(a){$3iwd7R5@a?9T22+^g#?pj04RIbsL5X#3wR07c$u0{-s% z(@^R7r9U_9Jr#nSs*|nxnpFW{0-q*3{s&DU(jdQW;58&W91j2T>zCTl`7|h2u6M5i;I1&!Uc>(8%8YDtcda&IfjeVya+{>_~m` zgFu=)uGBs@llubRL_qdlKS!n<-p31piI6mQSPbByptcxuS$+ehqpH3Bk;i*P67tkE zaHJp*0!No=0s`0`<~xj_h3hEHgJ$ekPBFRi0bW1)uMzv6G~&-+-TG$u9E3uiJkwF# zAAB1w>5(Qi;GA+!kE!O%R_jB9hBfg(Lry2Ym1+@IzX# zdza*;_&>Qlh>+Q&362W+zHjf@kc922Q4XGDlF$hAl7U~RHr}UwkVG8wa!B!wUGecBJx0|9hdHEtU;dAyGIL40v#B9#{Vrw z2E48GxK(??AJlU3@!^U@f3&jrh_AMtqq*;$_0$$SJMQb+LZ!$2 z^27$j+D6G~$O5a@-Mb{^5WvwVzc^vY0QGElx)@P9KlNe}%+a!Eq&J$Ytb!;EU@TXA_qf6G&eZDD)rz zW@<$igxt#%2b97*;QquoGTV^ts^PBMstCks?t@w|xj@$N{EefYegetyhOeDpQXR)xIfolCtj!`_K$G`h?dk@Cb%@uaev0$I zM}JrQzwdfW#tKdt@^9`=ir)h0rTr@{YSbUZ89NxT_#P=r;*9ob?gM&6Z44NdL0fO4Jl7jridWl@bL_%;CN!RL?GNlANB>t{A_@jWpE zB#-Fu@Y;xAXQ94~6ag$}xFZV7#Q!W{aBHZQ6m)^bNt1fiuLYu~@Dfz&QKN07Yu=e> z>|zg>%rC zEUGR1Vg8W`5QLt7HyQ6Kg26B^ua`hPo};8yV)eXT!+(PGAR*!6!yxX`9mMNQG$(}0 z`khY^gYsx~UXv_8Zs|p5(6WA$NWimtXnLnUQ)@G7*pl$+vq8>2q$@dXgavW&E;_Aj zNl2h1z@9MgFaGM1f}E&T;8+UhP|NiR_gql#^5YX5w>4w~r1~lXm_D8+w<9?Eu#Iw39gxy$Q78l9E0*&|ln8~_VS8iP&Ro0s3A~sG)V3h;*WHz7Gi{zG+fkO5ew@aQ8 zaQ8+dA(IjFMv&c?)pDRmUVO z`vj($-hq)|x!6xtGZaea{-&k${VU{txBK-07UBy9*Io)IRS)Uo_MNTsb!p8b9)5WG zdq3i4+M1@b=Z;w|T9{~=moI5~^rccdp00cYb1GZTkSjwHx;+V$h-Y*x7ik_MMVPY) zv-6{InCw+ip@Glnu!ff3vepN~0+XVBL2g`Kz^K-ytY`l>oeM5nvJ!0LuK*Xc#mrm| zl@>pEjSd9FSL$xeV{ft*UQR-7(UNB%TtLyAP{D7ILr*M*Y+a0|PFx2`b&c>18p;(N zC29=hHM;tHc=dn9{BEtVqS19os5^3^vTFXcdzGKr`r?OGMXwLKbDk(mOEu8TcbusP|=m&f28#v&L@Idtvz<> z==9TdEu{h7`Le&8_N&C5tzMoo>em3BD!_nT14U12pp-hfc(ekZ!0_1w9FCMaZ~~Lb z?JFQ3=+%H?ZBP`Es)pqtWoUg5c#SvDake{maiJd5<+`m!3;8 zbg1EYSlHMfdCEaTCK!JOeb^;Q?Wh6DkZDo2b?o<_pF7vFg*s;2L&DHaK(p~)lB~Xg`+bEWm;%uB^^KwIsGv_V}3-03H3e465zo{hj?7@cQ6cH z)mRZo3kygDzx#XTsHYdflmOIeq5^QIbb_F;rYVzV3n94=RRBR70``}|e4@_ENT6`Q?B-Xb)LQN^i}ptsyk$*7_?W?o1> zoVPgbKJAbqWU!_qY9GaD0o}2VZoXQbwy#gc@8b$eQ6P-5J#q?ucTx))d_K=UjzN_d zG%COv8~^b?2-CR3(@In8X(K{QjJR-To+V1rLPaM)r^b{6YNv5tM7VV9?7#pG`KvyV z?q&)T+!44wT!4B3DUOvFYp!MgY!AY79*;$g^&s1>=e0RS3DyQ3&-?btU=x);l}eo( zxcFE^&p6YkN3>wteBC~3)zoMQ@~}TEm0S?+H#t08^26Ii2~0$mvU2O@s=s!~oo;*9 zAagr(L^m|j^!{s2{65-BBJdsN_6ysaiZui_kxL_Z)tEyQZcN?PwY!iqSp<%4+$c)- zw$6(PH@*T=uKh@K0W7Tu@2#hhn!tBc+v|hwDJZ>8Kq1&NYGo(+HxYlhKs2I9tOVt3 z0zU7J>@+kgV|Yp_WG<6GZ(SRH?K+0km-W(Hd~t8NQ;*;K+hv~dzPEpVctlw@TDpoXzM2qz3K?4Gkve^ zXc>5{(&|p8!W$l|w?=>wXT7)mx-Ra;0PVpy44dzd!F(#Ohq7-$LOSXA0?=aBLhKg- zDyFNz$t9IY(A9wlEq2_d=L%H-p2P^HKaNKL&j=EdOgBAR!9Wk;hVD86DfY0u| zDBf(mTKrJD&pSs4nd15!HBzhX3Q;jUk}yB2!f7clX|#!^rwCUqh#bIRw7|OzFaNOb1mOz%Y7oI%yB`D#gLO=d!YD+ zkbBD}T81jVX@x|4A0-n>&ATofUJw61)o|{@`_oeak6p_?0PGdU^?!@RJ!i0fO!z~8 z3AuI9CSP5q8zhOkp>8M02KSGHNjc#pY%4~Os&wio_U66Ltlyx9p3h`^onYWDaO3iTvsHght7gTD-`=_VV zEbcB>Q6@zJHq+L=0Rc%&p}iVZpuN**PayZje$D##8QnJ@rWc{om?fSQwOw+2)y3}u zm8le6E>3us=y@Tri)4RrzP(ox5fh91h%Oli?j5?rc2+^A!gj4pGMC*Qd`_7=p(&a> zO;9*8(4$%x>1879AWr>#gVVQ0(9ZAAqntNYU(a;AXwdijV)h~C)6YJJjhB|@%29n4 z5*}Cxk_aHoZhAIe=ehFc^&$Q4kEhlr>Q*=ni}>5*$G93&S5G$^nNpyL|8N>r_yFMb zqM)xnGJ11c8Sj1e(FxN8+W*Kg$YX(`?d}*IAdrQPc*Jj^bZt~MVPc)j+%~>Odw0nL9_of|4 zo|t>2!6#SR`%`liV!x30r;IA!m!8P1!#i%i+6$PWszQo{je>CH+()yN!YVMq$G_Bk zIRMKq-i}Pq1+!QMdRE{TAssr7qZee`rDoB^3gdO1xs~FBe3}h`TxAWG9B3Q7r)hMv zYg&u^HzoV+DG7no#p(RcUBjIGbrN%yN5Z=ol94qg@ZTvxpkXcT$; z=ZOmO4o=hTW9a_!eGA1fe;qJX1ed-BJZPjieeVbHh2t}A%SU;(1tbYxir?byA7W(^ zczN;|jpRu@5p-r8W81+h4-?4RQX3#M;j=Ylpoch%grj*K;6koYo*rqoyW`dRdU>dX zvbU}Hw#q~$%x!E(n@f#c(iz5r$ zjI6YNX*^wcyi}**LeEf?_DaQrR59KFzBgyB`oP>zKir1Wd7cVbj^SWl!{*FQIG^BB zR^)@+X*UZKsuFeg~$miI|n1({`JuHGr~9=LlkM9I@O>rLN?JHGq^_Y+3` z-h+}DKQG}oLt+0U(3jL)nJrBpt z)+2FIs0D5rYLL7ec%a5XQ)kOCE+(IY7npen+c^0}5zX_QpL(H0d1V3F4qdB}?L!Az zQZ_PQ>VlCE8mO7kX)XsJm-TG@=c`QDS^LSpsH>d2y$% zu8KvToR{{3b7b}%2!pa$&CKg~L{4f7$wsHDN+u+){jszu2*Se6&PSzD1+ zC;2SGR*9`BBDc7)Bsy9C-Bf`vYBM8B?;rSa-equ)*q!@GD#x1Z4)Dwv(trMSN&J97 z>?Yn>%f{)8(r)K|7Brz zlmsGP*%@#B?xx(3k!Z_TjYeC%MQ2mojgR95_ng^77D@hsM+ya|CkX5qaeXsgc*BzE(%>jr;tEdrJvh%=dBcet^Ynugwr_1+@54w|3hODA>=8 zxAIVwBUdeW1^xK#SyeOIUHgkp>%(?H%_SY4#=(E^dbEiWydQ!>hj(%gXZ<64vhuAk zr}vY5S$3AZ6!z10V}`L%H#ub<9(*By7qMiM$a>ziXXp2C!7HstEGusk)zpsGR>b!R zxV=%-mvN1|fkUF>Pdc+STsJhV6Zfd^O|61MrW1i*I>niOXy?YxP3v__XR{g%ZkGQN z*W2_9wm7t(_WXV zQc>DS^*4MEyx=O#?iws3&Q-K*;-9GOeN!1GGJ9)CC(1VN;~RWj#|yj?-}G{Rcv;3hc!C#7|CF5;rIoq%I)>k`M*FK?nb^-j?uNrs z;qPmfv;24o!*DJV6Ee{Ci*zT%?mgokZ#(<0WYPw+RT-K@ryf9yMrNXgcNLw@U4T_D zS~wFkhWg8%vDqX|a|B)eTSP4}DymgO^;oF=SabnS@*+DsK%Z9pZ|V1r3m=AHdVhMx z2_dF>gZ9Up@{&FM2+0cx3!H0OtSl@l_b#1}{Vx`WyC>AQUp$d(id9m4FSojcq zMUR$wmxVs4{u04{$>wbKC6xm^bpKQGiyN=|eww^NFLPO+-n zno7hHlRXAS&rvf6FSQV6c-i((-IXe~d+}1* z{6V-`@*!)Y{HppGrle;?C%fqP1lwsDUPWdrwS@{s+~rBlGVo+fSPs&>KdVs2hIQ!)BD zqn&s8eHpn3*}2VM;gsV`ETe7vA#^l2mXQ`2&(^=P9UU49eUH zUf&uTJP{o*{_7H{Vo=)3sS_a5c619~9*BGhHYC;%@|BSz-z^q5(_wyay=~Ee^Jutx z3D>^GYi!(!HjP<%u~2)yeMZ>B*)Ux)Qno-N|H;!15iF(kh&{*2Ckr|08|qbT4fn#U zoCBO44VeE)`*-Nc=EZVndf!RyOy3T}%Wv(_TH zmjaYP=3N)0BAxrj@9YDcr0$f_)u(zi;T=nQSChzHHZa~q*H#(CV`eWEIlFp^JbK(L z1i|ea>41qKBPA(VgiE>6ZaV`se`g6)fpap7Z?4LY#|=LDsn0WZ#CqIBD4@8mkdj@m z;<|}RAJUeJt$tgWV<|dgP{A0Zv|eWr?=vK!88m`VI!kR8p~KyD)H>O4H!ZC%(?)`F zuuQ>dWqJQ*#aLo##lbKZ!U1dI_^LV;Kb2CmzHgwDT{rhZ@XOZyO%+9+k(Ho%eRn|@ z;IKvxu5J4EX%UT4jb`bptaO`v&(s`No`69Jsu@}QT}KYu%wSkP#o+lHFnHYjC)r!g zEsfXzO+*(x-IN|^G85jUaV*1QAQN7GPBq16UhJ+b;tuetFqUgAPV&2yX5`GX@8ZFq_()!_a*r$Ji9HP4!o{Az1>OY5WA zn@cewOrqrC8jsP$A`CZ{S1{#?fM0u879!hfE&1ChFa4BB2;5`Ai=pQ3x*9u$#(VPV zQm?O-GvYE2<8KEVzqvE+ui(l-`*X5Ehu6Z9K$pouKZN<87l3$+SYZk)^?0gr4~zkJ z%!e+`jmm#f*5L$E#?x@8OCsw$;*b{eZonPWvrl5NGR`37tAJ<3bFZ&g^QY~qAGkiX zJVTIvx3wAYq<4_ED#B~G$j5Ly8$$?N#C!JBLz8&d90&V}r|%4T$3)-Pee2J>S-`W) zkGvk6#&rV;c^6MMyA`$3HFQ*!Vlw;Im4z+^$bX@U;C5(vv7+$*)JjJHdNz^C(01)VkxtTN@DSRLA z*}hFk@tqk{7O1$q?-U463K#I_Pxj8~^ckFClvo^b$tk4V)NWBVa(U*xao_i@i}>r5 z%Q6*NUq{puBlIlB=ds7NY&8dC)H}M}-!dS+#d%R)l(?|u%J)yHIR3Dg#~IfKl6xOV z#40g&xa||Q|6`Vp<*k*QXrupGC9lwWz>5AZY=MM}djt^VXXSG&BHr3CNQ1kuH0}@h zMOIY)YR!S{_j!sgsiJ;_=0+NJ_vfhftzYskCGCNc4@QEL?$W>2{%S zb;G2W5#euG^>lY4&^V;WL=2o!ujT2l@4@nH1kPi!-D?hEYOP*7PMG=bk) z%m8eA4f!pS3zWI42RoE@YJe<41eKcpn{M6~-u{l!kX;&@I91+Tyx`u`HW*5`koRu_Fd3_{W zvLszBi$F}dl`B5Ud|0K{u{dz@QM+4CwSfBu>40Z$j@O4fHC@I&1tAIg5qQJvd^97M zLfJVfid6WuR=!D|n|Ogi>Gubk$|2<(=+F{fl_HxJD5r`hdFJsDoFL~&!XnkQle0#x zG4|VA=i>kha!OLxh>Dg@-)J*4R^bG7+w# z$fPK2uZhZ-Rt+(%QQ|{WapMtlfy5`WS!w)X@<*))#n`y9nKLpzMGUJ_++n|3_R(^_ z^;bCI_yo;CPm(;Jf&nO6XH=4Ts0a{Ey479(Diwta?I-;Jb6Ts;7)i4b#k;anvDwt#Zyf>9n4S0O|8E@h-S%>KkH>W#insVDbK{&P|!lqi$FV9?zZ0O(%uhR}bvO z)sGZ$Qc8tm`7BOdx|Y%DLNUUYZw1k^&>WL9ZuIoIcJOrPp-}!Y;bxQ_lrZADvo(f$ z;1xrNNt^{b2mY6|ago!R)lw5IL(#CW##qMvEpr~l9z{Vx$D1hk7XGpA?u;P>myhOQ zD`f|8Y5f?7;?0|rqyc=Oo@i=hDC1?AB!PD;eI z_5iHZ4cXiBBnJB$-k${qqzUZ6D#zYYqtDWwC4(Me^tg+&THFnu-IVB`nJhJOMSIu< z@KM2sNsAfxS-X6+4r|WsFWn)Wc4be*nivXkbOk6kxN$wk&V3BDt8hgimynJqg~g1xi$5M?k!S4ahClN9Ezn!jjxxEYV}DFhfJBNtX3RMN)086#5sq(^ zPVaZS>8l7YDg2_y$oy|HZV9`>$=pmBIMr<`nSKIE|2in|p*`@U~`_n2-Jpljhy7q>8kaO8cv=hb`=NvA@HoW>22@VxUKn_M8d0G{*k<`xzOjR%R>u1<0D0UuJm5j(?;2AIv ze%$xMXgcKSutMQ?@%q#iRaNssos6o7i9UfUet}U#2BGHNk;&oMg{NhaF~0UX()0S{ zq2iTWK_d~&)deQb0tBfRVG)G+K)qwh||Zg<&snti)9 zdnbK>S7dF!f&8zh2b6If)nhWqxtRlh*TWCiQ12!x9x2rES$WGUn>UAS@&87 z?B4<|4X&!b`yq^Qk3Zc727s-DoV!o>(c&z+Hy$?ch4-7ECdkhG@L=R}v@PEVGN4R^ zB>K4V4$#*&*lAq-?A?7e-}BUIkCEinvXW%U4wpxlg3KMxBt;BZOdULo@P}{C#f(>E zq>ar*jA=7dGG8?giyf_ttq)T;-~_wqZNS(l2V@gBNE8jy znIZwvbAY{JeOzWowB-0NQH%uS``sFQ!pK*FzSaI&8lSM?plzFMXwo4Cnv-!kY)hBf z6+cRoWq2#h$e;naLv}h>_s)OflbZ?}UJkp4;ki&qm#fFxg^UfoZ1;5oeMsyQ##7u? ztDYsq7oGE>$tK}mgUr8F@CPv|>CM8=3NQIB?W?LnuCM|z^@%WCDSx!KKyY#G zGpa$i$7ttOpG1*<9lhMekVIc&iGdI!e_@n7cM#iTp1&@gFePUwaQLKIT>OrgA2%G5 zJbjDdOL~IIykm`~6_R>$xKG{fNYU)GkHPZekC|1fTxU}z@?zNE#7b>Fzw$CN3ul-| zXThkd+W>R0s@qWe!r_O|xKiOEYS>b7E#CK9HZ*jGRI4(R;QVxxuXI_w#9Avq65g zd)A;+{*N^%0Z2_0_hPr}De5xPt@(t8mF}IKT0Xqj3!`3kR?vfEYC;F-qW)I%|GOAZ z{^5D#%ppl#3Eq>nE7^L)CPU%KSAyVA?&jGPWvz{u zM@|(oW<)BceB{@VZ2>z6l@ZM$$|!K+c&C$JJiE6k$HadET^J}=u1k@Vc_?R5hWsBD z;V;iCplcre2#e5s;0y>8pI@1gkGWyhOqcE%!bDQJ0JxE-U8lt&zZ|ooS3Pgs(Z>@I zmTm?UDbL(+coRO#_&jR0eJPkV+*a?Y23bdj!(_VB=DY^hw`z#VOk?eYL-DCWZaL@V z9yV|+e7Tq(-YzhweYE0Xwpz)rH=l;hPP5T6uARAH9uv7&G;c^#t;c;UE}eKQKA7Vb z&@k^jEMnVL2*qxJGCH5rGYvQQJO<9|@pwAx8D#d3w*Y69Oy12pSejuho;6>&xqlb= zYB(V9S<-fWrZQIn0GzG4k?@zLTrW05cTLo}(wxS_7Dk#|)^gNpy);$oBnsuK zuW45$^~zf0p83&K?#vgLM!on>*1bLFppyx)B2zb7x`bO}^u2=u1X8ze9Lj5pc~I%% zfDLf{W79uQ^J$FkZI&kU5N-gQTwfgjA&eAZ$_#`JoFXcCW>&OJylIn~2lpW*u7-Fdx+yb#(d)!EDGeOWbx zoybjdwWE~ROJR{o&i$)KEBSF6D~oh4(`^l@{+(8fqjSKbIh@AB?T@*!{FE!W?iw+C z?=p?|SG7T!=+kOuDA5WZbOchMyoaUu&QmT{=9!F#XHkxP#iT=en;s*(YSBR#tHW|5 zbP5oPaB&?oLR)=Sb*kr$n$ir;Fi0<-8UQiF|6N>lXMtG6%HUF1vaEUi+ofrHYl5g& z(8ySOxs2(<(jWgg$mgXzw~hn*@v=MeV7yR^g5$&j!5Z zO{kEsn6O~zAh?zJgYH@e{5JzG(TmUpr3X6){}Jh;e(rPPDMXEWiX;D zIbI9t)2ghnan*X-{Q9c(ollC$p0p5u-*mPg_a;;Onz?iA%EK$mYdI=@YZvUlx$G%5 z))R?(&s+sV|8flOGvMiGr+@oh#L_$3O&Lvo4}GFN7mmQqPJLs9Qg^Eues|>~sEUMo zZNux}Ux4dfCzkO*GqB(L`0+y?Um9W!;#d1utpI=53f262;=&l;I4%z{*jW=UCfr)9v?A7t_bSFR0uS7W@krMI-0Ac~Waxw^&}ckU3sVAgQE*zJT7bAw!eH$B1YV`A;Nj8QKq zuWQqs`pJgtX=!k~_ZNLneA`=S(y2SB3W#lMY%$?F?bWC5+~6g%^DGFaAE4&K3$s{^ z>krWd2^OWPOELn{QUEAlD1P$XtvK*{1M~g|i9Vfo(oYRZg-9NjJ-naXkm5~*GY~<8 zrqM@2U?QC7L>wcqyKUP?cp3(PMT(A|VIIo=m+-?ETfPiIg@Pa%qMwrd!`DHg+<_$8 zf`ow}tT}x$$a`CJ;BWu1wftSltgj*CyOwBp$=T<{SY z>|^B)sC^67`%nIzbu|*b()!wlRIXioFAa{*uWeu?jw|F{koD=xkrVc{;XkeVE&pqIUU{y^oLgS4w~WWOvl0>^;lrjf>g7!Z}`p2A|sN?_p@W)%%{R z@HxaSh8~o>kl!P{era)&V^Hae>-aAN(le;`?|T*w$fegAD7Hgdx;527LO zPX7{6kfUDeke2X;RgAQV*6Ug&MA6al|77Ivbg|#)-uHsX=38J&V#z$exET_=}KU zF^o3SBClsPPBsK|Q$GFtR24oSHp)bgisVeL!D7GKr?S(EH1@{*X-zN**i$tltH)IA zpT?hCbZ2id{{%Dwx_t{_HGatXL)e{rq>|Vs0qW~Ej7>3x{amqj5eCC^s_C{$dq~%N zKjh3@ENn1PpkT2OE}_P9S|C6AcE{eyDBgek*l0q4qI|-BkegzY*U(6^Y`j?PBJCkR zNH1yZ_4{(ZMtk7%fR_bV70Qm6^Y|I-STM;@dD?~u>h?M%FKLkL@ClK^WUa> z)$4MO2F~1&Tdwh6nCth=VU`o@ug6+~UDET%bk5FaF#`KVcbPn1GzCz&&;!8qJ9!Ke zgyNxCDwCL^$uWhtieVWf?gpy%r@}F2;;K22eyAh)nWNHfqI3)#SA0I&&0li#`{I{m928Zz83(Ae<}is(^5T-UKIkR2v@&1R3Ecgr$oC zaZ`NqZD2ndPq18xH~ashad9>vBJ&^%A$jKm#Y6kt@*8zYAs2f#t<}<+EG>*91SBzf z*A7zZFGmZ$+YZw-16sri?Q0<7Zv?h!iC*~s z6;?(hcoL~|OZi)?w*RhNMi)ojPiMAf!cl8AnX{;*IpU~qWm(iphrV`TMGo4fSoNn~ zV)|87nO>}r*DE!U;au1K2Ot@cz*BXcba1TSF1htf%e9bze+B>mtWgwDKO0*iD zZ$l_q3EW$)UML(xoH^FWU#6SNbTyF}d}N1E*E$q4o{=ckVEPEc79##+J=tH$>ZjCQ z+}FdXUc_H;pWsOkLM0klvcODmeC+?2iI4{B={wL0xn{~>qwKxsVqvDAy^clG?&0j1 zz`Q>aM9MiCk^d5G zte@XMG+~RBt)Ra!qZZ?Oc-ErRHlfwgne}S^k0##Lhe^M>a!lrvTG(#z^}B|Nvp5_q zJ}imukNWp8_zOHwhQ`o=R=$(=$xhEQAw#7*41ykdHMPVu>YiW!D|sDYMcK;5qYEn+ z>UZu#cl8+x_66D1n{-1C3oo0fzgK)987hN?r+pJQbLT9~1>mu0gRhoj1~f&Yp;!v9U ze0zCU_g*rOoZRQpO{W+}g3k#x)?tIxw*>V>MzK96PycKKR-~thK3N*csDQE;y)->g z2vel&mm=r>O(jT%IOt|@DVpSm(Id>HPhLt#AK;E1 z(IqAwL=?1-+ILAM-Yq9PsMm%bP95JNSi~~*B0Q}AX!d|CGWljUh^EMgO3Wc(~Hphbu4D0f*90&J%$}8^Z`X&1nFL)AOC$jG@TO| zBcd*|-H?PQL+F8M|Nb&aO4RMAg}g>P7+0kJ)OM9Q9qYtR_Qsvjq?GmJ&S(TSNer|( z&(#hsaeU=~$Ppb`8z6)(6n>_XpQzVn_ECWk+l-!0`0)gjwe_G+3Rq&coI z43@731F~wi>@%+os$Ak9Ow`fzyD|IGe}J1qR&763FQV;wVZ1ZxVzg58)FzN_TjCzL z+lo(7mk;854y`V5AxWQ-s1@5!%0c*Jh5uz}H3`Uiw}^>p1A98dV7Si@@zSd&GRXa+ zIR)vr(5+&aCHBV0B8%L*T<<1EYRn4T7*_6Z_OrBJ&d2%|gZe{cnS&W#j}mTA-pp5K zt5(^G*lI$3RA8e`FZ`fJ@ey}`xALDmeQ$Zsujtfd=WY67$id0;dK9fg(3R+jJ6k8* z;O99&pRhv+Sly3+UovFIaz>ie`Xo`G zrrsMG@k~LA#o6T|!-B`nz52KB_&hq%d=N?`7wK0rAm5DWLp5R>c?Jd zfBQP|2A>AjrY<1#IgiU^uRa4=%9)ULT<@|QTofHqJKgeVj>CCLXB5nUiTgoR?0*) zKQ;V`;6Ko6Lx5;e%$Y~K2*sz-nknor%SFt$YmXmtA)p7L)!0GYXd>;c=PMKuPF8Zf2%xfvJwsA17p|sfUSI-)ceunL#l9fi+BC2e? zP@v2c-CRfo8-C_#MOD5HSQm>4=b7^BIe3kt=eOoC=T?GXE6Z|Y$lB5bsY9P)oYG%H zXUDSsyV8gxv=)RsHe$Kw2Os^M4_!&bKpVwcy!qr~11ELe^ToR*O;Fl2B3*^*I~mwi zm`%$Z8vPF3ZXAg0mn2!ezmfJK_yi>A_gtf=;4f{Nwrw7YG}~1bF0;l?Q?ua9>cxzW zODDgaxkOU%_T6lotL1y?5$E|%HK%bjxq-XRkH=ypVV1*rZo|F8_vc+*4AvrKs7(%Q z@!ouhzOG^G#ars}+JjmilC}%^r?XBC{!ju3!A!YhmRRP!q$UxLuh*n5w5F>00k(?2 zzvgewOyR@CzF!Wc4m{Cz=C`(htv_%xdMtjN(|T|5z6t0ubcgc5deJI6a-e&BpTS?^ zp?~$sAkDXGl*=N;9QTPe!J95D=nCP~o*_u_zswPuDRw6|5Y+l#e)S^M9BQ2~=9}^F0bX#oh>ucM zgDa@ozjENGBA81`1`-W6S{6fCzS^urff=Eg(^Pw#f|R%4LRb(Sht%~!U|U(S>$}t> z_pK3&_QYWD#IMh<^#K@r^E904?{<-P0hhVqetp9)^F@R@4k?!(#ns2uGb`hTM09Liz z<_D0}vzF(kf1KlsK6#SeTwe6Oah*ii$~FAGA?JuTQb=7xvRJ&+Ede9{JxA-$x1%c? zHB~E{m$UG7ZBDy>%SwLm6+;YTw#uBzRF9*G#_F<0Zk=Ebyb^3K(kb`n)Sqn6#pZ`V z^QEbWp@G}EQDfqgv*Kf6`R03(LT=bgH<#IKQd42QaV0a1(g7~Qxvi8VB4KL%IA^Mw z-}j5@VwuuWMg0Lm-q#b;*FyjoBi6Ub#F}gB!*696&dt;iYb+Nx`KVk-R|{x=KtJiD zuQPxwc$VXzOi<`fMLxXH=f$F2&5AylC*Tc0(hj34bm4IwpWLe9>%X{0#7?s3O$@)k zHgYn~HKu>!k;~|ZxLLJ>FRd~lskacyCHi*K7mykS17tQSf|6@OIk7&eKG!Fljs>WOFUEf9X_PZ9UM8W&KxG7Htl|W#=iywwM;mCQ|#QXc8mkzbu@#gkzlE>MBA=XHY z{}P6O$d!5orQOKw$C&M}$ZNE(c4hZ97tJ(sPg{_YBAr8~plaSap;xxfp11Ih1*wd^ zB;uX+hE>(jf1aVcvXW}|e%Qd;n5P}D{QPUk>L`XuhN2X&(Y4;Rb7U|UZGU8!;X1gG z)OtqKN2_1lDJRD)m)|P1so>=)X+%k;wIS`poCRAeOIu9$8$6v^YMLIW&F0^5!|pND zr|h4Oy=bXvUOlw4X1pNcaRu?=;T=MK5%^u;a_}TF0|+HjDX_ASp93WfHU(8}yJkHK z76}(m;yk?F1x=E-j|mTNJpMJkJ>Q+^JcSN2s9)jxy`K#}^QS^X2tiI<`Lyfvh~e{L z@)zefF*z$Gs;=aw5j~6Q%0g&)9<52p^l#4Ch&LRU*;TAukhAh-DQu9J8M^&p^%OIsh%Fa`yqe8yF z)p*o#m=d`2Fl~P8evm&gJ?_ML$Q+6}#{jK5s}rHaei`^vv<;N7Z9XBT)MG9I345R` zEs3HT?R48MP!d1w+|U06^>K1G01P}hT>sXZrC`72R;!w+}5 zxzu-Y5x?R|J=U3|4vh8$#O15))vAQmqCnmB`EZAhyOtS!vyC*F+eH|Cm@5+_mdN1% z36wSwYz63^Of5&B;m6NqwC#`9-)`|f@zTLGIezxOtEaVU4;f|1(6aBX&|ER}sBgy) zHN@WeqUQU1>1m%BYxO305=Q!!Z!{H|n-3WhEX}7fuG!36z|)vY!fKV7YcK=q8|3VU z8)kraq88K%V&iU@ym1QI6pS*lqecgc0qDhV7^Lihx(dRsLo#-GvKJB``JrQW6 z22)_1e^J}Zp)@-yDh5)%DKxl$J^fx%wSQ9oLwbg?y| zfSmWyWaFY5oCPhQPFrBdEG>o&rxxT%DmE+yORv(f8fGjvhY#+%+~zl#U-rT4#isa` zJ)UW^kzDt2u|I*Y*pglmYq9_UuiPQl(DAH75BFT;x#v@95B8eKLt87?AGHx#J^z~_ zFWo0fZ>jH1}^741Me8CW|~FY`f) zgll&c_-}yK3`32sk7tY)AD(p?yA~rqJil6)T@+vLf`hTI8_KfRam|Z?4ORjd^ab?GL zH{N=Dn>>S8O&3LhW{rB=_Kd+Jh!TFK7U%JE)d4Xnc^xdei4QOT3`?&v4N>GH zoT4nlCTD&E$`(m!P#Tx?4CrQX>Se^F5u(pwH__+Fbdszh=|438m}DAZ?A13OU=JWB zFW#Z#+9cidFJvS6I|RSOnneqOw$<}f-1HPB)6QjiEzaQh>{4xYR-s!J-}iUJ-;|y3 zTYgvqt)%UENm+@db`U$lMYhI3d$=Io8DxqGS1R^%)rej|f!xSWSGiEHUXzEkReaSI zyjMca@xS@Ixwsy=g-TZ2jBIsESE25*huCZ$&Pz1mWo=*|v?icc44BVw!JqEL%Wao3 zCNEIJe*jt6K+_B_>b5~}S8WVA;j0I^E26;M?u%1Mmg7dQo$GqCQ*KnznqSLsqWr{y z1j|kVs}DhM_#xEhHng3mt9ItMx{HDD0|M;Ifx4gSS=#Z_y%g{cFOKW=TCR4=XiRFU z;UmVl zNMmJqyjH%~aA@p$*kR7;yYX{)3G8OMMi6`Hd^*L1u8$d{g0Put(=0ltHj(qPUh!}Oi9S&z-y4zot$En zgz=37l@$*216ciZZ=q*Fu3RWdXW&bcIY{M+ zaG$!IKcr>QFR*tsai&iAh5Tj8K+rCl69I7d&N=&FPNAtlh*&d#X9OAH^KJ%-@SKtD zo@VC@`A~Z6ofR;6&c7vkP}1g=|IQ+xS$cNSzzuf&-wpo6qyhK0vw_MdON!~8YvNxD zz&AR@OhRcplY6OXz5o;)?AZG_2fu`eu9B6!8;Vs!;>Of%3L-t}H7c|dl*Xw!P3{Kk zToZC_rGejOb)REjLLHx@7xU&y?a-v)r&+Xpb=^xmXz6f{Gt^v$pxYyEVdL<(L zLG8n+t+x*?%wK>X#^G?-o^!9It5ivE&@H&zRua=w1>pa7rlAOvSc$47+$eOSgO+fD zd|k7W;oW2AiA0Ik=eR6GA}~2Er%Fm!3Fg7Ox?onR9-TlYGXNn2cQt3 z@pqhZTtx=y`KhGHL~`6g$fzOjI{b)NMTx$${uGih3rFph$ALP@qfiR6+f}(8`*`(+ zl0fMk)FjcSJy@a~gaGd1^+};(@lj>wArZ7n8HQy!8oX@bopd?})k_eXZR;4zMg*FA zS%bE76##sR850t%87NL=9SYu&`ZQS0yfG$wJYM5?arfkL0k2;eyc#N$X3qF|gA*dOz@NdU;3-`OJ?YLG3&%sbO7WqxwE-+~QLp=*e zVC4DD_qWk*$96EoJ#NqShbb_VP!K-}Vtxju^toO@pG*|bW*UADD(Uf&X{_Znt-x_8 zkvS|Ca`{ki&)5f5*oiJtAhlK*lRZvukmva0xPKG64Pq_$2Q$k+%BvY!%|Q%57nfxTrMS%$;T*dz?I!H07ia%PD*{23hO|vLO=uKz)oTSct z@<>4i?_JaM*_3kZ(pj8}6le2ATV=YYWgJeB^7xW!`K=_@ z3kI!H;`7w_hN&-tY4&>%_}yF9ETFI3w0?I`W;gDte+fF)1=OYf5p0s%&d#UmM=Kb0 zy@@rs=}IUU6YuH+dTp-@a#2dI3ce z{Vr-7Re*F6UiZ_5(I5!G2g5JvJVU-3kDfw7e5V^yV#I9-bo-T#uoQbiEGQ~J_FF9b z^Ce^_YrYO^#rA;`5bD;dj%(n{*CcduSp0KWJFSplRo)1;KhAMISXFOLFxrX z)>U5$z62~Vm!+^8l8zBhnpgImqYU zB1k#aKs)|joDmA#z%f$GRV&N8_eB|<(QK$Kujm%8Anvez)yKx&;IFCTKcp-X2`aJB zykoj#lySL0iT3y67BY8(kEY5mk1gU;xAKsU==2o!aniZt4u{`6z_wS+oELVy581T_ zi-u#z!uIKdC$uzHOG%y__zkd8pWqji_>x48_qqpG2IS~XCQJvULEMeh)`HvMdiXcM zZw_~20J;Q?ef*4G((QJ&^~5^eF84E)%kfe7gU1zM`I;{#X=LfN`MtfZVsSd_DjaXI zwJ5FVENH9VRV^25DKUw<_~($9#D+@ivD3)lZw2@rRTzYkVGrA1t~)O@_8p7YJ`DKYB zEsPT4YkFo03Hu6?ttYUwsDURxiQ)sN@po+qdmQ2{&sc~rtW25ObL@mo_5mFBtB16W zd~$ycaBu$n5*GQ4Eb7J@P$9QRLSU8h`_F@;yTlXM?;4TV3Ou>`l7%QYP*5T=@D!j2 zZw^E1byh&jmkXenABAH9{$$h?m5Q;PN}|Hq07P@L;drdL8-Y&g{|d!1^xpnX&O2X_ z8*ww=Qc}!#M1@qwqx%eZ@mEWY`Xb;tjf5*$@lA5G&a}$7mDccPy9*F5a8RNBQ=pla z>B=51#-6cZWL|u*s;R-LFV`&ly5f_o;P*@L3H6M8eg)AKMg6GKT`pna=M*=}Fh%fIQ)WJE?E;#4!r2b_d@Z zT*HgsK$qDTgVwRpOvD;Pyml#o`mu2F@c%PDjnSKE>I9zxTFN$``-Ts(rc6IH{W~(n z%5Tk3j&hw{6Ztyp6kP*6O33q4C@l9mFnaBK4)X@zFg<85+c zl~6eP;k+og!s6kEZiV*8yKZ_65(aB<^S;9u)7RY!XFr&_?xaB+p4#_x)ZQA+5!)lS z`2MGQJp4_uFLA0zXz!X1qKQp%q5`y3koM3;k;;N0T5zdqEDGfC#BC?>G6$9)%1 z@wk){gMx44JtISQHBZA)JIwHi4E@om`0-zf*hdGq&vW2n%4*o?qQ&a-Yj2~+2mW54 zuK%8d?T_sK6xKVJ8B_4kRp)YpA4y!!l0+`>q`~(msDuBr0b8H8#Zwvyo{~wZ5LXw3 zS>#?+{$6&~;coDL4cnaso0ZhGJQX4w3QY~=h+iFf=PPtVV55&HGi3jHCqsYE?F@zu zfH2z89+L}S_fDZj;v;XDhD_RG8y$SBa-}txX>p|x0^Af8l(3#HjZc3G3U2ekTZ;x4 z{(pB^QRKH;9r3 zPMK&qVvRcKC)BvKFIo$#q5p_{Zr0yE=fGX;8OLJ}A&V%Z6{FRjY9E1Q?EfbiB#r10 zk4cp>*FBo!G+eR}6sGZB9m;=xJ?|SDG!t~Mtx9h>sgfl?a-S+#bim0`V}khzjM>Xe zv{3P@klu)CgEJ6SR@^H25AG8 z)=55*mwNb$Qd_k`naG!8@BbgN-UJ-#{fi&Bgi2^`*OCmkp)?5Le9M{daqB9(zCp0n6rP~iHMGNH93D@DN=v8_O9%+H$QYPeqLGs z6kE(Zy30E^&DeBlvSL-#zk0YLHMJ=BMf-|bqJdY_<&jviGSZiuoV=pk%@HGUK8^fp6S;ns`&CBD>Mq1 z=6*(7$1r&h|&b-5NfXMHK2kH9$+%U+k{MkjU?A5pWZ9kFC>dUCN<7pag;-jt)Q z$>H#Xj4CPIDbeG#t^DWP2PNT4whfKFwsF*hWHQgF$}=fFzX)gc#sh(8I(}AcBiR#g zy7~S3f1iQ+we(eq%;#e6<*6S%S?v6S0pD)XiynK39vK%@CytA>_a1%6Y}z5BM@h=& zW8zfb)ja4skUOxl$41V}JOEvGJz!tkEv|r6X~$n;aphC*TDJ}Js7u4)6!7)=k?ZhD z7W@75h!MdPo_*{cY~>-ymmSOq)T^VoQ*RGk2ggvtBYuBx-iy|EY$USyR@SW&3b1_ z;?UDb?^AenDcs)#hpOEtsYba2+I{TPkFQB$iX}6jN0a;LHpRHEJvL(4*-G-GTarwQ zl#P2Wx!i$xQ_3SNri9Z(Mi==N@J}YUPrV_fizMBOE%Er-_=xgVM!pv71=M0y+~sW=oEG=c`}GJn#3+Epfe)y%QQu^; zi?i;T-^fcEYk+gT!`WXm>EokNpI*->-q&^Z*Hzbrhe~hv6^Kv9p&JL_=*3rEOc4tO z{d);8+7tYwYqvCqRk5Vv?m5n?LtvZ=&d8w>$;|x$7Z8us^sy6e3`yfoX?%YH&IxcK zMo8)L`JKm;*b_5PGeU0EIqoX=01N%L3pTDC`NGh2`ETO&B**j*LSB}8s=iG7{Cxal zKot&nHLG>5_;pR9<4kUA>K=ETq1!z|(_zM4CO_ZZC7pV?J`~0G>NwP)G@>}cc`mXY5_jp>|wE(+x7bSYq<`VNSeGDaOU}+C2Tg~3oJtE;$PPs=u(xWOIFQa}Tl*z^y-;yq#g&ho0V>-m`iL zW2tgpQm=zAh=_0b5O*(gmi3*<*6o)5zbG~q^LZep)W5@Tx)NC9uf>sGWrnq~eU(@I<%k28GDJ8w_XGqF42FN)PJ{op6NY9zxEX;f2sI)smc zKodWl>Mau61_s!DC6YQVh@U8;o|gc;9y}-iy;MFZ`+t1371Ws%A?AR0hpOvQSKSsS zl-}Nd(HPYA{FoDi1PPt+KsL?F!fFOL(+%(EnEdkD@xQwOCwovKu{Q}#3G~DgoZ|Um zG1PGAU$GHn(l*v3fH0l}5DvB=Ms%G549lmtgXbnV+S}&tN#%@+qfj9b=pC`FkepH9 zs4XM^j-!8nR}m6zI{3z~dwm(4?N-X~bj{=Zdi#CmPE3_KfnjbiuHTWHY{~QT%D-MB zEStXU15Ud!+RJ3)qwZjopiq7LDZz8^0YW#|)RFjo!eq;d6sG{17FP0V)3{n%YR#+<6a+meD+17s#`#^ zw8yRq(|qO;2zigJ#r)`VZ8D4EmR0w)B&UurqM6(Is5~5nfRQd7YT-{Yamwn4PsyVJ zh2%#M(EF9N@yy%G#|0F?CNm!q|8UpuM~!k_q9o0&Ay6LqAC!@tBFfBEZQ$K2HRY8n zSkm#q#hxt4vd@V6^&nZ=okDVKF! zV=A1p>`u%3b>8(nkQz1!MMduo3D#?f-o-m)Rr&>pc=GC2H8_S;gXN`5q!sng(x_v% zkzYMzvMGitDUpAg!qk$3ah?();#=kO3YSuU6v|p^F3ehAN%rq31^oA zJ2>udw}?h?T$Y&RZ2Bz(Iegh##hg-8I%~jhUR9h!>L_w1#px|Hb@VtBt)OssDQ4xT z4&^hlnQ<46*}k4&&7cPp-hNg3-{#*Qof$h6YIjy3d()U0 zC8ftXeGKI@fMn|1en4?v{Q6%V&1ge1wdA@_Rg=-ZOdU7V1ZC{f`<9(QhdRdg&NPzh zk{AzU-l<-0RoSf4EdTYd$Sm^|JknxobWya!M1o~|9wR09)kM9eKmzv&mYx8m?abz% zs3=F>GZjIYu^n_{;D8s*^jCOU15EzK7KakfOm6Rb57PQGs;?o`5*4;bkAd=ZM7Xi@ zKt1d@vGy03E_jQA{5IXY5>6?-{v@rQTQZ)MQo$IrvumSt8|gwe&-9_=?=1zHw)TZ> zB7sR)eI|`!G;p%!1qbvf^+Ja=%b&a~7#5B)Et}T#n2H_>t<|cCBQ!lEFK=hnFopOs zH>1fC4+&uuG{*IpcKubxo<1~%O+~oen0Pbzayq&91@&7pb2nBCKgiQ7S*fMr=Sie* zS5-uqD|^{)k{6V~p@?*jeUTUY_UAU-ki48n?P%q{|J&s{zeBd;Ne+om*62f<$e$P9 zdg#aC{2BbIpGRt(%<`FvU`VBp(vyM(B8Au7XH^Z%TP$pro_5?@_G0AIQ<%H?Qufka zQ40bzYC_*J%Iks32poW(ZzujfBEq1B)v1j~+`N=L$9YU;G zX@FPHq<&k6lv7bd(3i%9ZDS@5J+o}#`N<{s{{KB>M2^~!{HuP4zP+)X1q?G205i5hmnp_ct;b5L)U~3NV~f`zY>X$Jcki;S0!rRq&bhoPnupD!`o$YG29p(&z<4LX{7N%CIj(T#G`o8rp`0^0xrV zmnU(~Up2_6Y|f#FSn;6!HgRZDzivW{-W>6s|I?}9BP?6SA{0wc3w+BqBepgFojIy< z9A=z2r>Jac@jpI!nJUi|QMM z4`nBr5o5SN&tS%`6Pm`rqsm{0=)>@PHwuu>aGv8_yXS>-ciGQz#sU0RuWllECmUrydW;`#);6>oX$F9Rttw=Q=|3 z*OC?~=_#tw-ok(WoNP0{2r9}6h`!<#p)qoDCih|5RWezE?U!R&wos*Ry@o!vw&i~a zz7G(}d>7OfY(6OEimMh?8Gk5#yzjbl;SS>TYm0&Fiw=UxmggUd2D-C**jFX3a*y@f zWw;%?@b>ObC+}I36>nBAlAsTLE;~G2`=qYhVKL3>t?G=jr>5_8LTTpg1$|X{ObybI zXmh%AEdPyHe@XpT$rRBu#$0x0kXRmCIZtcu1H%Zm#xn`}7w@Ani&{trqX;1S^4%=n zur@5*5=p{Z6Gf;ZUF?^00K-zjW=MjM)cL&uGh6V{XKc|y#U~a1HEQoj+I-5V!UD_^ z&21Ih@}Icmz}9NUAxVUt?~-s{9PnxL`I2Lre=`l#uL9sHly}|cIt^a_x<%K zCh5l*e=)B8K3ariv7||IBfCTkkjJf{)cFMT7CA#Qd;;@Dt8CIawDbCvXJbv?4hW0P zc*+?{Iz8GmyK2X2E+}AG9QtmxU*Mt6DeRX4dQcx)loZI>c9#r!l0rJb`T>PVIgd&> z7Lf7$b`NlPf&?1jXsc#KI$32azwX}>2t4Uak$ioq+}1GEF@^uqRZ;EV?I&m6K(3}q zqpB%yTT*xa_#q^WJ{@K}3PQ_0`ua9vpI9-eq5i-ti6gNOE#EaMeG}(C5>1B8r(=p& z7g2%e>>#cLLpLzZj`B%lZqMn#jMbdwQ*$R>Q^48xYS&js`~rMZ6f=Luk$Dh?xR^kA z&2ag7^f8yZ`v31RS3i8alW;!uV&lch7G|+i-o}1je&$DLtfuU@GQr*eZ+&C!9#3-Gr=K$y~diY0aE zTkXy5!Tmrp{5jj;f8_sc|8KvgvlRt8D|0Oc%3XRLh9&_)*U*#xmaH!qht%Q_fQe>t zXlj(Ph8}AKwf?FzeDKAwTE#+CiALS2a=cPYYrKIT4y7eS3Ulz0sP}*doZ}~!wkBQ< zsZS%|44xGG==SFyGx3v-f!SFbS=3?-?(!pT{63XKTd8qrko@RARB@}(DD_>5&dat> ze#53Kyap?+=SCHDFeP4>;!A0ek(T3@%Nft}YMm~A{bf@KdSusfYaHWso^nP*_>?~` zUVU+TIIm7ma{3D68JH;6T(=$tn%(vl5!OcpH+|;CP({jk(x~YDiysvthl(Y;jQMx| z7=93GQ;bkUUmV-Hc4(GoWN2%m%QnO)H@|(1$p4e2w?v&oGMLI!6mDbFg<`d~zQOB- zi%z`=ko7OHr%w$<3|#4!UDEXVl4n@0Vl8S)qsp<`K)gsO)2-hJI?^R!R3ZC@5!afQ}TYhy(`O!M60@Kb~b#%`53@KY1`DJ=nb{8p65iXwez?6+sHE;uTm={VJ2 z3h~je7Ps1;CpTlY(Ov{*a2iNe!p{GyB?hj_D# zQyx($Q%&`;eJHKD6*>u#4kRz{QUM`~^7``LmvXqjU%fHLt0R9#pz7%axyH4egn9*k zQj}5~YL#YBo>rN0FR9G)T6AM>ohiT7B-H+Nltla2KKFiMd1xsTY|(O4UTth)57AGb z`dz_SN#gz%6+#s$dmlV11Avw#BH=#Ag&Ng6olAFPxQd=l%q9PpWr7AJlqlNbMK4PF3ukxdp=FXRUKXc~v zoVP8lTK|9?W?mmF$mo3-JnDg37$v(nMxD8F1oLxS%?@VD5GF^+j$`dD-FrZ=kweki zc2`St-1&1J>@`W~V=b7mP4-_-NcOorr@1b6M9=Vh1Ja$UhCR7~oXEopzx*D#W)ubDu9a zD+7XyRu|c>gTDgDI)w4$3*`ltt4)CNDih%cQjK3>?Dj(8PI?S^$CHNwWrU1tCDdX$ z+~xPOh*Il{NB)(WzkEFGecjQEY|M0sf3O9gG6*#kMfEM#PLb-K%ru>C<(X4kImlQq z`;pfwt8$H)sP#EWz66e#G+#C@xcTNuolRV2d-urFi^>Ak|CNdTl^H+jZI`iH*#9kTwrUxvVcn7hl2xFm#9 zh?sN^Jk(tsIRZ_E17nBruV%#xg-3r{m)ms}mVX>GRMC?zDiQ)4Ne%u=MrVyEqO@cpyag&E#`n_E$IP zBHy6lw}DS|m=Hd(H0|i#kkehk$CFy>8aZ!uW`{m@#c;-dW7uw1FL@!`^O0vXo6b7SYnh_n`uh(5%ydi&)~}l|Si2Pd&a({s9H) z&IFq~k|yge9YOjS_k5pPLyB9`m6f}Ef>kAPC2F!NK}I>htEse6y3I1{x;TINTVdtE z3Z5fGx?$5#9eQolbiyyrlXuCLPEltJg-~S;?8>g$34}1tH7Exffwg4PRpL#5UZwyu z75y~L%4|#3cn?wb!~_K1OcYU{7$NGgCDb*zo{e>LSGmhB4>^1FUo0Xs4n2v{#|lHq z1)w)DK_Tj3AvCE=#68puP+huH_d;ju?8!$LeOAhsK0pb{xp*)~CD=B~c=88pbu_x< z#MPw1O{$VY)tQU>@XOgOsF0+H($8-XiYK5kN*C~h;ug0#Jv$DmsNzmtTlmRjo0=e# zg;<{iaRC34rh(C5`9RM?h}~KE0*MuF{KE zoxcBq{N81Pa|F_QJIYTLs2XXs!h8Q2id(UZX^5ohVp2t$73lE+Q~Nh_Ig~_k0xpEtb>CDjD ztARl?acDWOC4b(HOwU&v=|Jj_pAmyN{N1j3Gy{PqKwv zDLXSAPiUHt{0@Sx@O&KEjCixO8gp5U1bWWu<%`%axlHo@rRz*Knk9_hKCpAHw<08` zKiW9L2%4)!uNr~u{edEk^o5CvmsDS_Pg-PIvjQpP5qKU3Ae(6jh^le*mvS&;8ZkP4D35)LkG<=_GThmGv=Ss!?Af}(h2Vq zFSUpIX@sV3U{KjG zA=_>Zk?_s}(G{(V4uQ^s!(f6#8e*xy|A3HsbYcfx(xQ0KU!58ze%t&dmmc= z6;$Jnkt18d=M`LPK}q^|m;E)sMSuij3bfo*%w;*&JHT=0IcNxBJOU!o?IFbKb#?pr z!1kH@l`k%=<`9MuhdY`dxH57d;-_Y;l4Lt*2K3PhWvY{Vxx2+m*pIQp-uO+|HeXld@@pH`*Ms>=Z# zIYb(@kkhVZ?&JF)Mb>vR=`xWF9FWApa$tl(#t*Xyxzpc`|)F|CHLpQF1Yn6{z2e*@ol z&OQbJiHiTmhon8wiWPd`wAy^h_nku({K%mT#s~M>43|4Z#YhKhidEf@z9v-Y5)N-h zlge?n+&yiC$y{SWU^}CG1$K~2AkhXH<6!G)5q?MG=_|!EFVn{0U5DJ7$xw} z>6i<}rPQ&d6%80jv57w(dTL?-1a@6Q+7WD>CWbtEaJeau(BlKR|HIu%P$G3?bkBF*eZiY;i-P{m z9>V!9cKzRVDP0OQszKyt6PP-92diZ2!VcHJ4C7Tog-Hq@oAU2>oll3BY-AhjK15*! z=JKvh>p0Q$z~CAC(4mt}1sos|?4R27w!Y9K%9DmrB>=`0MjJZ(Ux&8m~OnGH~l7XOqwp!o*LX`KozX6zJWlCD*3 zC8cDJ{lY_+M=TC1=xN9l0ae?}!-+RnC#ncd&~wxs##qu}BB&qWi)}bJ{Xq0N_1iv% zbRXL7E~4X=3O2Ia9#Rh-7DwU03Cg>)(XV(sT-OzqpVRm7ArQqa z4w6-k^h$MGsa*2vRY&179WE-+94?xgR=+Ix9(E#QOjn^I04)tU2X%oRsN$CZ$czhj zH+EH;o3vl7s=Bt^A&6V%FcUpaqjp9VItIG@JG2)FU^?r;Ot%Tj=P1hna+)lKyJ<&+ za{t{pn+O5;)$~vFDj2%9sxj)JPzHh|PO#xm0Z>eFtByUNvD`}IqhEh|2M6H?mEq{% z)gnZ5EwzbjMx=p?9dk(Q`5((8=bui8PC=ES49R?L@Sk^P;kT6mJ&Ma4y9MWapVm1~ z3ZD2eiJpFC!mK8()azXfQq;R>dwU$?b~}+R5nulnHHsT?yGibq!PKya^7mrF==NAz z9)}UIB|8my%+L0?k6)w7THgM|t|K5twa1B=L8k=*W)wkImPWb|)pedm9btTYwlNok z`UL4Rb4HcVA`ShtpNlmGQOke-wJx5?Ee4Er*7pMTi|+5hefxjA;K$T+54}IUJ93G_ zBsZJM{Jl3C{C=BX@n{%sPUD<%!#lHv4%J&_%-+#r;z-zhTj9=1dCN~VrFH^`u#G=r zJ#|6a=_ES39dU;a83H0h2IHh%kBhWpi*K8|ulP|j4gI9%mkFMkgns&I52}n)GX;`8 zP7O)+`yK(SMTo4_P_{9M5h9fw=Rhb!kaU$DkbnN%pfCxxCHx&Qmrp{BdV4 zamyb@6K5!*qjo*F18WQj60(-VHRRlya~YN=UNm#<;eP`-`t+^(am_}b?PSZ>Z~dui zFab*JnC{6i(=`$jFD03V*wl(&AXG?38bBj3v#iU=LM_}O_o~8^NMbBe3s)KDbgRh6 zA3#p9#(oiFOoAlv!X{!I^UQ0c!Jt#wL5^GKN?CSc8&?nI%A0^xZiqo2+aBK9CH1jX~2m3Dn9^5u>8E%DwJA7C|< z$ngNs#RPOc>~Vo@r%wwr#0E9H&8wY`glc9p{xrWZEn*yP#eV#wzBdOY#oZ7aO`?qzZBz;LiqnjLm0Tr#Wn zB0b>h+}Ev^FOXe438@Q+W6J-_J4MdjA=|(arz-TZ=4{T8Z>62s8g=nF9KJt^Op;^u z1A{12Fu;Zb79nExil-nR3jI%QbNldZ@4mx!3PS;S7Te9m$YgMEh2~F6N}d zw9eIgixZVbPD0h_FjM(Qqe`5u3JL!jGD(Eh2mPy*ra2h?^kO#BJ2Adbz}#KR^`^&g zhsIu&+Jf&jxRT~`(qg_)s574Ot`*mEI>we2m=;|z8hwQeSRQSu*&G%-cCmZ}wl>^X z*Xb0|$$*G-=8NYQd%QVVon(?e@h-O=eoz(@#~H@1E(tHe+$}@9%BBhP_nKrjqgZlk zv6f;L=CS|-U-i)E&-Fm8<8j<&3&Z#*`Ifo!xqbjND9@!sM(k^EGYYnJme*r1=m&sr zKkmWT?*St1u3c{v*%`UFPqyDPk!=K+6FBEFzz=bb?J}TGD)yyuVcNxj(VT#^m8qax zi?W4SAb53!r3`Zqo~!I9ULCGETFGikRZCH-G|1|1*Dd_)pEFnO67(xoI{(||e##T7 zuM+DUlYdHQkazhqK1CSyOBZjKuA#IHvn$j>5F?SkKO+3L)MA}(Bn9Z{z#a=x)P3}` z(LEq9bVdxpK3-sQZ-jP7_fR(l3_egu^*!M2NsJO5uR-|~LdQ^1K+xw3-BMCR z@ys;ze(idX$dbs^5-a})VhZclP$Lz|p2Dal*uRnDJMmkILzP_#`c+6D4M;6>+7*{N zsbLL1Tn>mh8DE!M^r`x%i-MrtVse8yXmNU1dUv{Ry`Pxwr`S7WUG4gwND21#`h^pE*G%-^aHxQei52Hr(K9w?w4W?}*`V}SW1N2n&|Ah;U%jyYq7 zK8rli)opRGPlc==q;0XptG^@ZadB1DeJ!3gjo?5f!sE2&*FP34szQSd~%m{X`k#YtStt8y+BON@uf_7H1k}qunj*G;Ax~@-BZ$`DYXts1jh5 z_)2yDh*xgz^atn#(_bP*NBxiPlhR+3kzS*8L1FSB*t{4fSb&S^+#jKm7Gh!Rdb;Zh3$@3EH~o3{hcSiLN!ek zg##kkVYbF|bTFNGC}Lc44BRQ|i7FEI=I1ZxD~^YNgva~(*GT*WsUhp%!M2**Ff-ww zldJnVXd=-mU`2bto%euVhj(9Swfluy4R@05ynloQFpHmPHL)UxsQLE1um8rD!bcrWVL72Ttsz!h*IGTxhdcm-(q3KV4a z5!iroILp=!TWZL_#(G-{^q^_(oLSm8CDb$19);Di#1OrA=&Ct~^p+yn?Q#Ng_IAAv zdqkK8wP&-~rJ-ITXF8-L$L;N>@4PP>*FI`MMpZvy=helOe@hqWu~jnj=|Et5&iBXK zKaR7^9Y&$cD+1i%mx-O`Y!apCO_H(b)nul z@+9X7&jw$wEb^cajUz^ogzpjQ$p6MQ1YfTa>BaHA#ELroH=~6~1_65fY^;{MfBq#m zDeDe~br(-VC6UBUuhcIIc^rZ+Z=d!OtE8bW~Xj`DYZnfQuszgxB z9shglYa>2jpK`buQo29Ky=|Nj<_`G`S!Qa!;N%!Lh#7A~UX|R6_H*`E^*tSBKHoJO zQ&MUDV6$LBAg2OoujWF*foA8xVh1U{ghcj4z>|iz07I6(iLalR9CJV|lp?!&ur-d; zt-plw5qkN@GRMo^Q3FzZPl@>Gm~aEB+U5%?H`GyGDc~}(`u0au-y;y@0PE-JsUHh zoe+3qO4pv1OMh%dT4_UT9;;#x3G)|*wv5vn(BTdiNR*sn+dc2P zXAi~Yy!}TZq8qi4PbS&m)sJG{Js8mW=ZDKcEx!I4d0CR>&Tx4??4EqzN?PyLFELgW zb31K-cR36{sEWh40u0zk-mxpcb%cQfN|M0^;#wIZQpuX@U*rl^fNZ;!jy&lDP%4#6 zBII@I{h&IQUvA=wbclFA_eu8W2SQU4*oKlR9&{0}4%<5vMT_6!V5zCW!!mOY_zYzT z*}#>ln@ddwl+3qf3*6l8Mvn$)IVG=PM+9Un);!MeTN--jNK&_>me%n80!BjjM4M;K z!*kHZYIk_c&_X3hfyAgTX%EJI`USc`BrNE3lSzJfrZCFS_d77RmS{4MZv7~#Xjv`8 zBb_5nfO+5QkY2T^ejf)2bXu|AI7SM&Jdj9+;+nQIlp@%OWf za>m7z-@r0J=G#yAtbS~{{S(xBI6W?{O1w+%;S@H%~Lch_bPbW zFvf_9+I~9=dtE7Iv%RCuFc5U>__8i2ZE_1OB<~gOvY=F=)~8C*c~==B0U@+wf-TxmksvlqON zJ1;r7bJ-KGu7<;}V8#y6ry>gFxx7&?6=6+ZJw9jOv{HqU|B(x#hqI8y`Yw?&r|D`N zJ-26N&OF(L?;_&0>o4%%bqmq1zZSX4E}k&-tUm={%8Eolx}bzSj%KC+_yyzzq?f5!v<4U9sH`g0(K@ zYgcs=c?SgdC9NJ47aW;+twQ<`{d2z9SSmIRS^DDN7RKGuJa_+(SoWoqjn%}d@WOql zM0Q~|VUF5ua-4Fqw5w-xm?h-i9p(Fsg)hy*LQf#4XL{cOSnk*F`vc|0qnQk@e(Z{=k9|ZXw`LS-rFKq-WFEZ{o@YkYh_Y>1Mn$)01oZgqTMRUelapk zJ%Xv>L{`sgg3&L%@an*h1BiE%+*j+wHMh0A_q>q5feZ|#a?Yi3uI(Th!VoKGS*k_Nox)Z{-#X{ocZ^~rcUNjz- z^Dws_ELb+JY)((w{5DNsq=SODsgA>UxWGcd35@N|BTb(7MV)q)&-~~qDfksmCZA-h z<(A_K6RV$_sf;kt7p`oPq4t8G~XdVZwMV^Pn_)*svgX}o;h(8Pa@&- zmB#Ec%-9^=6;|40-BKj09zT`m2i3oV2I6Ay*jantqtJ8dJYwUwciW{+5v5VT!Q68v ztDY&eI5bROK`l@S(Ibi5VI80C<+hW{juzE$lLZRK+ZPc7+V!AnxR*Fc>9zh|x6hnC zesS3Xw=8X6ThV5%meo9qlbzF4HS4(#R=rjfh7<0Sdz4*n|Lkb3RrxR2G@LIj4{$Bg zDr>0P+27s8p~utPaote1JUDM``j~~%38jvoJ=x{Q4}az#Gk6|-@>4E<|I3^bCUUE) zSGMvDrC@#}f`{W53`(`^<-8N8#h1L06F9l?=6mbe6K7b`*{@zihjix( zF43|Xe+vq7Ir4U0%Xw|)AsF-7GVp?{$1BT3qd!gvzU9ITX`g?{hPljk`Y+~#YjzWF zCYKt~GH13wc&zX}1zth59z0~e?Rb0W@nere+BGIsDb#-x=yp^>9GUzbNQYkQ_}kqG zX2*Tkn%+(;gN~ zrPHn7-Lq|LJ;xvV)l~cR~^xV?Oy(aLfIpvSqI1a-PHavwdHNehuw?Q%)XYQ~* zuOAeGUkfDOoi^5g%p1xLT3n3ij7A^(waX_NHP9jj=;4n5;Z$^tYdqk&nkt=05FZ`+ zMM*k@frOaCCiZCPnW8RR`ypDD8s_1ZMwj3Og80rg0oGlnkYSDha90YoUb|s6KZ;}W zAz@>miWhJlLRYYiFGDQrl z2~EIn(e!qoNUOq16%5Ra?@3|r6#Dr)%Q6<}g$7s&EDgBG8A{hKxvgdXQsdjY+~HhR zPum_hJDG%r_0ahwAVXW_ExwBzPG~V)BE*MH|d-g_j&;1P$d#||!* zk}U*6<$-$mx?pKgxI2F0>kA_{K`pBIds1aVULPCc+~_h*C5ZE+@Q7(m>WEK!Y$8(y zO4m`p$nb3i^0d%=>bDjq+~AaxSK1o%b;3i`ZV!FY_SnG5EJa0mS_}(S!|^jeNki@Y z>Balmxf-{1Z`)RQuY5G^J55p4^Wu@)5&@^3P(FKEi!eU#mtc3M5;X1fA19(S7G!AZ zsPgS9qL1#UWH|i_#apMMKS}dH_4BD(?TMEhn*efSJh#kbQ)=RjoJ86-{?OeP5719_ zNZY+9dXL}Waca!gq*0+MZD%ux$hxxRVhN-q3u$YpE;g_d;c?Jggf&iRdPSqgKP05V z-K6I z>;l@v!@Yg4_}<|8u`gQ9B{!sU_)$e%B-Nu7_#u?8FB>R}boA4AA$z>Fv5vLO-Ln>0 zM^EgRPC6HTyf~zbBoircMfb;jNf;4&`Vb>BU40I}gaJydF9{$%^nh%^9tx`-vA!bS zhS)D(2~8=0Gi$8l6Y4i*x~ab>W%BaM#zG{hVHAosF{0w#jmRb79D$G&WN1}I)Nj4E zmeVI*=Xtafnm}0DJg*Pk;Fmwt8jIu`myRA-#LknkdTuuhXaC5r>q$g5sTIR`ds_Ne zlQdpy1hGA2&##!+SSonC{;VM_<5euUJGWct@arGr#tG5$pT?>Ot~bZqss8Iu($m0V z9wrd@x3$st#KE}zpcKXv2(rlX8ORCq@sY-`kIbYC6-@+K-l9v^g^>7)~VtLjM#<4!xB;uq_y(==&I4}TP1O@(fYK{KkA=ZJ{ zuwN$Wj&%75xEu;V%B%@7(ck;bFODwpQMGWq{hhdgXo$Lq$l0a`neTkz70Nj%r;w>K zsQ{X`Z-?`iRnUG7@Dpjqz5z2v?$PW%&G?e88kA%14uH^kI*v+MbK zY(ukierw$Q%&FZd=S0G=q4R}Eh}nY^B8&ikqe1FiYjcD!K%P7FIOfC^p2bQ+(}S;7 z2ZNo6F%?aM5aE)mRX?HIHB`eqayE3KH?Qn#V)W-mPdc z!XuUv2pfVT#K%@834}B)XZDd2KLhGlM3$zA+MB{Q-am5m&PMS~=o`4o*)6yScjk$N zw+ZS>n)1^!`&0Uj%Fe;b7!KDzWo{I|$%g-2Fz-h*PG)SNVIH)h73 zo;BvzY@YvSqO^CYdxo-Vgb%Ht77N?{39L(TKdQ))`FE=qtem^IJoh`sn1Ki5shQ(S z=>$qE&AP-e?US9>pJ%nt{y0Qy;oxtAOodECpLZ`sEc(m+c)%vafKx%2F>;ue!j5bZ z#QxaLkfC3oQO~j2#gb(|7*^~_EOTuMl!O~z012fCgF(dvCk}yqA!jHR7V~zVhxDd* z`dNRJ)U)yEobZsyWAFsDRRwU1Daf;qgoA4pWenQq6md1Vq^8fG+`LIwSow9$JcY!_ zI)2cG_oJ=2B~-2p3S{`g(urQRzHF6lhcXt;dncAebIR`q8& zY$ih`im4XtO+h~`0E@cQVLewlf#BrKv-phAWUP;s<}hcE;yj?LH{^QFvGNb6>}8#VBC&>GG#CH`hhe^B!*u=w(>DY(c8xUha^HGi_kFagR z_^RNwG%JYlj?feZBdH{G*Q`FaL?0XV5R4nPQ!HW<<>osqj4A_X#Sof6#s@{XLK|>L zSYxw**_@7Q9(BwwNY8L-KLhs20%G?%8_QMvPxtz}S&`CFYW|qH7xV(Vs z>xTUT$a%`l_9W;Bg<`*mFb=}x0I~5tIa-O!g-&PokBF$0;4h@AH?06gO9jM+*l$r= z7hXywQAPYDD`Hed$BoF0NOuD)FtB0u*S*#c;PK92zoZ+0s5lAkFL_V`7rpWIaD4zm ztECA^IbBy35ak|juRTOO;ro7WU|2sUywGpXJaETc=j}$X0AW};ZhG}><(zH-cTLxG zHH~Kh-3u~2WovUM`JQWopJK?jXjEO!P3l%1^}t+q$9^%S_dzn)!ATEuh)crX?>`jVlZ3#G=q=pHe;yWOh;vsieGzP}IUM&+Ul zl)&{$UCbCP@fkNw(!gd_84&uLk062*#X3^{&Ix3*`Y8j(9=9%ROE z(zheJs7{{Q6emmj8N<}7G3Q1oVa0fru>!VsTgtiQs+x((eHb(5zm zhhxK) zd4XYgDMakTH$qbt8E}fxTY?67m_Eqd9Z+2>7P1Cb)L|S^YrhY-M68UPMW0cR<0nN% zYy9I-nrdZdY$nnrFWYQAs=qEmvX%C|=l`noo2@DyIN>?`vtPA@TVjVgt>Ceh73;%F zMyUPbt~m)gWXRcB$d}iqdiI)SSK*KieJ1USU!!x(|hIbK7KO#akTo{zMs&v4|LqJ>`6ZMka!KY6AximOim=kn!{QGHK01&ag|0r$tKRD9>j#H#7Nhn z#TPv~_3xPEFaq>nRdBszDsCo` zP+^7H4I=C~5;+YjS03qL9ob0RStPZXfwZ0KciJwSyA}@%!T1%aEJIt=KdJSACqSL@ zyv0Xm;;Pz}CE554C2vWknvL=ddgCOIZoXVgf%U&|UHW-DnpWT*@6(!q0gbr~cO)@tF@tLcmM~tG#@7cLU}GYBkkyD5_B5rR%lQE`YV(wJRgj?fV(<7n za_?n9`S}0t0;sRwsfFHV+s31|hDY&i;ycxMl8z`1tLP=&Y}c)a`xmPpWg=kv;5UcX zQQ0H%cJlUWY8Et{qNpC&yYGm@ zt$YiQrNd^`KJWA*iFt#`_J7vjg{_SjNmR$&PnuR^Xh0dBM#`|2C@K*eo+e?`0Ib7r z8@>msnd&YQteZ&ZJSVM zgKXYVwjh)F&O%Yyvas(wY6xy}Ab+gpIw z660rNu+rg}SxNQ~-`1=n;Pk{d zCPObTrRP>Yg2)U|jj#WqM{pQy?Btfp$pXC~jZMsEX)wmgEYvYEJ0uTIKpe7dpIexQ z+G{l}0>w)ZHNr8RG6XvXT^^V*9{OYn^yIiNdr&^MkeAh^^%r-v@Mi4c5T`HZ0=p-^^gaZOtk;O}{kuvGAYXkzo@>-&gP!@w6K z>WXpHxqryzC*V~Les^VSRk2XzOW^7^<(&*BY)6$e^n9zKGROqd z{XnDZ>SxKcHNC2_E0=dB?OIdHiIMbK3=}@aS9948o1~a@qcG5sVRya`vMn0CI3$HR z!BNQl+OZ6hoQnc0iQG#;cVvO)>nOEUEH*G1$aUsa?$QVumGcNwe_^v^0o9l7#OuSKd;IW= z9hT2Qw4F$$nqXEX7DE&vruZ(uQ&3>zZ&+QYOKPTRDhr~b~m zHw}`ch79qgkzO#i^Wksjzn7VcLMd|=w$I(m)ci4l)NTl-#5c#cs!`(aYDBXr_x0v8 zU39T8ZF@5-B!-i>Q2UB_!=w%7-B*VQ>4XFSWj{O;eqJSc9oe}IY}9tWlZ+d1 zew6QEj1JK(MiVkXEJ_VNHH2pY?J(@eZ>!_`cjja?VZXvD&=D_xk*4KhL7xOd{{ikj zE+5?DGkMoG2uF|t`0N$V#i7cFk@iCW#5g# z1zFYubs7>zudDpUhqK|_H2}DMggD*ktuO))fWIrv-RoiP4H+rZ) z>VH4U*~wq16k`m&R<7T>E};QPr8ECU;uPyt_bD8)8=Nw25leanKYn=THK`m*EJ6=~ zBAoCDxfw06lKX-LiT9t?1{a<#Z(rJWm7u1}pMv>8VS_FAQ5wj8-IM zspXZe4guRHexBANZ$pn@dAa$<}wKFO0o|L`o}_i3I3{766A@M6tTgo0@GHp z5)Dwq`1HLZ^sFKIe4WojOK%PFIhX0Uc`>=WwhX1VTbisy2qNYA z0@-m_Nc9*`dtU;L0_bOK5!4$P7%M_kORO}9zdEuO9M0vb*MrD-XrP-xvZ2zuHiZsT z%)M^t3r(Uoc$1*m9R7UR)Z*uCY_73JE146tDtntPBbo1BuZjtP;~H~^Zt1KWUY05V zt%rrMz39!apzJrQxWbLqard^Ddq^9NyxH#RaA}C>56SSV#ZP)(C*dZ~E!T8o%!LEKym{s_$ zo_O9P9pOBiz8&*4`uAh`8B;LwPFZAw9J9$iUoCG5WWRmu%A}Zj5Y(FT!syh*LtXm(T6p&bS@%6uHB%x-l;VooaMjnJPboJ3J)aZhdd8>8SF4OTTJzR3 zER{!1p&)EDe?{DrOU12euFdBFQ2@pxf%ekC^d2m}DzyPSRx-b}DcEI&-4|XwAy|kC zO@RNPGINMRAiSEAWY1{;!b0}lJ4ElC5GR#I$Bkrl9$+vTs9I#;hviHx#?uh47_ajE z;uFN%5>uLXo%Vp=WfkN1+Qh2SXYo&c^bOBrLv!|K4pC(YL87TcY?K5_5rBaiuLj)`SMe78}P;)4tw=B-}22Z?x< zv6n|zEW`|7i*l%G5f`vn=u_pOvLN5o+)is1B&B^Q5zUpAt=XwUAjE{9r%6iD>io8c znnPGu_a%g8Jv_aa8w$xakPfWu+%(9XYn<7Yelct!trWT!TILKI^69L2Fxmr zzKiMsIwERmc@;Rm3$M~ov6a?UX;ZtBcMbVac^)XF`s_>~*tS3&T=0GcG_@%k{Lt*Q zMPre@rkV&AUEyLDEiiux2OTn-h$!E5ClI zo>rz8l3{`u{s9}t7`;h51f3$;^~`aE-6F^tA58IKs(&rShp$U$17i^!5bOQpF*UVT zVA>6`DsUKPYvdfd->;oL2L9_3_0^;jFAHQ_=h0hn zWBl$dPe~f3rg6j~0@N%a+7I4Fr<8;GC>p_j?XdZ5~oXfXcc5uom4uNP6l(P4z?2wr|a#wF9E zkKkEZF>KT`ly1ak9%`mt{BZxX%9+D3h~cUClxE`!a-G)Tm1#9JIsL>ETw?a+HD;KZrk*hA7o0tEx*&6UC zgtSPwVqh0?weLF2OdzJX`d|D0_EJdpLZvf3_-;*8MG9h4SE}u$X983&)&JTRuULW! z(_D=3FD-*K)uWh;!c=B}hQW0Ozu2l~%G9S=JdYg_GDfP^Y+gK}3&+7Yf+D zQI8)17SF2qtTBXOEw56+m3zhZ6O7?qAgUrQauj=vCsL(?4{^a1slu8CCcKc8Oz|pt zG5}4?3@Q&D@9mSO6PA{eqQ?Jvp686D9Wq0GFK16zo;9>I?Ev(rcC`9KD2l2|;&?@r zQEVVp3oF{;d5#ljWZ!&n<)e(jmEL%?9KMaUz3fZy{k{vkSop{8!wtUW#}?2@enjqc zpEB_D#2J~@&>6At3;=Ehw}5#8-U05(5mTJlu84r04kTXbHt85vrl?23`h0%ewajTV zAAhnx^wQd#xg$cAq#{7nVAM?Rd4Zr?e(DUO2=W+x^~O5X<~R?Rpwly56g#f*S_Z}1 zNb_FihI%6>MV-Wk7`}r*{_&c7XWxZ5VviLRe`=UK1nVZ;6MFgL%HL2VN4!#A+*d1= zxol=a+-Qo#j?IE(3hWF2jSQY}LKl1lMg5nUbt=&{lgNMha#iDJr2iI zn%I5veCOdPLcGDJ5$_%H?*QL80l}(+{f;j6FH+_ONxxy@LRA~79L{SclQkmlnkK|N zz<#M<-GVN>kAhQM-j^HB%3SOA!=+)eS3Kux^ap)dz>UPp?I&C0;5)J@HR}9p zsaqfrqcH?)bPzOV!X+kE99*9z^qY0nSkw5l5oPGGwtM=yDzFd;5-Ujg)!Y!GXeh{H zdjZ1Cs9-!LJkCM4fg>-q1jHj{zg^*-jL2qs!Id|Qw1shdJ2W3cgFKY89|O9gjNU?4 z&T#9~1tRDC&w*Vy`g;psM+3j=D&hD&u`&lie8rk}b!((lvm?7brkU$HNoa3q#cj$)vFEg=l0 zl{e{%{BU{cw+6Id+Put6iM7o8C4Po6Rv?=Xs?$d4+$<+Nlp~Fc=#kJSGaVQ+*N){2 zB4IF427kE;sEojFHWK4ZO+*5um4~5R7m`@492Gh$b??9jgBN*ccz*VJGk>^`PlsxV zZt8<61gvLUSVo67^o!_SScV>U@D+CBDM?7j8sNvf79Uw(B&L4rTc{m zNFM_o>{ukscL9UF5hCf4${U)1w34r6eq}2)GMZoI4{w8|Ik~0swcqGdF@i&G1@GeN z?CLyf1hFUa4L?vUTY}VB#LR?SOTT6$(H@U7`SqXJU5}E2jR^-(@P6)KthLBQ zHmL#nwfDv#PVP?A0P03&Ln*C*Q;vG0s0|<3j_m5;16zhTGEBe16W>o*?jBk)TxguN z6F~QEf1~Pz;bZt2T}I>9nin^W!9D;7bE18 z5I$?;UDR0>zd{440DqAQ5%>$49;_`>k-Ucnh^`K)jpT*3HARd#X9l5SuR8^czp)pt zSgLLic&?z{Nt7OMyns&sJGS$EZ?e-(5GZ5aFIy4(0b#mUp0Uxgam^?Mn!VP|zZwk` zp(p^sbs4HSW#bz+KE3{%$bmwfE=J{~r~w#v;e^j&-=+-I^!`Q6 zu^5F{e3*7uHQ}E9;IW>4_dg3TJLvzPCoVd?P!5+GV4}HLH$Rs zp{*9T`cumqrhPLb)^P&(2Znuz%5$2S5E$Zw#*W319)v^rvw+k7$4qmq7%^7WHR9hL zBH2J69ij%?sKZ^vcD0d8v132y4nh&lgkJdD|5w+N2o-D!z=g(^A=Kzgb6jwE`)Jq@ zfURk_M07_KfooSJzTp~;C_dpFQ;_N~bhaVqHdbAYGB+N6@&RH%5RZ*dfP7C<%Y8a5 zRcQEzT_4I?R(!}uw?7K$>nnlkDDIF9eK)_j9H_$WDQk4~p7`{7b{5#=P8_sCOpD*# zbb(d&eBVT(KB4L5-`s@E4vB0bjHhG~$H=ys!LkG@{V)*wk=HxfY4Ydx%HD#Az+M4~ zxqfbeD?lo`ykiK3C@Gp=fGM4i1ijtHJOJ8&h1JAu?OBgQnE2oQQFLy@?*!d0KxhfO z9Soa4GlRcJ%rrXy)c}kL^R2W$I5@N@yT>x7!|1@3yn*)I2KX!gofG_)6_0d@nIHQ< z=Ttnu7^+L>n8sec8GD8JP!cPr*{Bpgg>ESUFHXGA09-)X+6R7HRuB8$siZ4l0GR#v zdzirvhi|ifhg6TVFF`2lVJDgyT5|X;I5Cp(2@k)fdJi;7rm)jOCp-qpm$akP;Xy_i z{B_LZ(&P`bo*7fpZKwy_ixda0WDzQ86`;BO zkNlM28WTuX5Rk(xnO;081`9b=T`#rn48JC_f)+|Ln!5GlBu9^7noPm<48F?G>N- zW-1;kufQ3KwDdpgnnGCvT<`W%-B*)yT)qfI`mz8vbU#vqM(j=%Dkin1*~u<5=1j}% zmj2?aNp=J;^^rq8)NlOFQ1HCk$ZkVpRG3By^duy(fdY@;b%JaxPf;myk5Ua{af z%GTF?8ZJ=eAZCW6#{ddu7kZq!W;p6V4E>Z^c2@{;g$<2H(V(am?cT~F$C?c z8&mS&#|_+*8`1LNranFwpB9|aojO!LQY;6e4=7iIQNE*a&?Q8X`n<~Vx4%(plUM3* zPS0M2(G8UV-P9Mi=d)wuVf=DIhJHtY{?MD(#=W~6ID{!zZxoA2qAf>G-#3qbHHyGm za{kTu=`$QAS}+H=c#l6i{j7x3M%o@_MQlMkv;MplGA8^Izc`Q$-J@7rLEz{kIj4JG zJVe-%hZ5Kwo_?-Tr34vxGbD@$lL1Zo5Y7UduW+P70(@{cmrx~1=Q3$De&h=i*IM;e z#^;F5`TQ*2(zfjF#4pkdpN^dc$k8MZ&#Ak4HUGNt>_$C@AJdz%s;?8kIPLcZ&=B8yFB^B6vJ;;h>3WD89Z?8U84gD>js~o_#<+-P+9|L`C z`>jH*hfLv-Tpp5mU!636AKl#Mx=;xC_+wS5hDzjssM%`{AM;|w zO8Eu$DeRLv+_TDqSP~r!rW_qa+l&lXI_ijm{*ppu7Y6ABsIEn3J!c2O9+;}^7_Jzn zX^#I9C>!ocq__w2fF&L)t!;b4>JMAoTX_LA>G?K`P^0i)N=pW-t){L8xlZ-5P~^0n zno`^R`c(~Ap+L5ndLv7?CMsi(#se>~CsH(01cKqf@o^6rdYGBbfpMe>J2!C# zhYREWj=35%gj~UUgsemp2(plDQtH-{f=Xbd+bIQ$)!-jnj7UNaQ`7HcPHrd^#kC9z z;t!q0L4oxb#0!k3KHxdY9M4lEZ86rXRP_~`@)Y4s1g1w06mnbWRc~wASf+UmNvy90 z)OXf9NZzb=FMGd@{KY59FP37hkf_V5O00N&)d>OO7XU*+FCS?frceb5ti^mDY_>VA)(^Jfpp(Eu$*`#$m2E6snCl-+x|!Y{)eOzY}GS1~^u ze;ID`C?zuAs-v85Ej<_$7YBz9x(kpxQ zMf=sunYog7eo^L&{r|uohl}p$8zjN9m5QA-h?!|JHMWyEIm4lQF3mPQ15#)@puVKD zrZ0AF_~Y=+x}$mPU-BFzGam>zoi5nEu)loSv&%ULXC3A5A$KqF=f9R?+IyKy8^tDN>2?SdGA zUF0bg4M@%_oCTGxEas#f$_&%bYLgLYpCXJE1sux&9wc+&((%O zKE9Ee(`*5_*h3(0y@@3@@UJOTk+xHd0=S<6G1mF{bH@-G6!sS~i~*wRA^(A<3p zlUiyAOI$|si`JBP$Zn){zM$p5-{01=^g#v52cMHwoh_w(@6-o+^>(~_fVE!^oVYG9 zerD*JCkvJ3?csW@EN^74#Sd-U+4A`fK9+N)Pg!yXwx(ppY|N-WVAsz_V8?7J(C(!4Bx?W;B(T}qUDWL!#BMQP@+3*wDiL@ zeP1md$&aufF|3g$7;`mBA{g_wC2fibW(lT&$zBdKgMj0`2{JeYN}y*W7J)yHU(lYC zEr5I=t}&Zr7j6~MalW&CXip7KzdW`ElP-Wy`KMUjt>by=5eUzLF!Anbn%%j-a-V4c zHD0E@3%z8JCX0qEJ!;)5Yd@e+cJ9oDRfo>aH|DL6Y#Ikj2lKc;?6ARz(11SOpQ&Fh z4QJ(C%?+DD&{6>MYBZ|R#_eJMYoo;+-?rui?#0v9S1UFVD3}{}3G0&7>l8*%Vgpyy zefsnx7!Jqg6EK=Op7a0-J>&Ud2D^Fsf^5Y6&qnkcS|U{q>zilOH@i&P7^gIb z?fgE`6gBT*)i9^dD_X7qGniRzs&c8gr2AC%nb{#Ozm>nm@dnmW1kj0t6sZVlVBlms z_fp3Q{*E}XQ`uepX&zzl7(@2a-3SrmPw!#x8<*+i$4>1htS2GW9nY_5B7@l2HUD;h zsePm5@^pIYN9Vw|FiG$?*zg%4zp9T7VE`~vZgNG`(Wdi2MOHIFz>!nFRz0~9aqwS= zH!Dcq&_n*mLQMPW035C zdl9Te);}|Ud z+L^<d`c#Ss(_~ z{M%3Y5Q#B@0q9mtK{P=dK)w?JlJtO<_^xEUE#BoI=-G93_9dHow zr|ONy;f}QBNQuHjff8EZyNF?)fvu!ufY({8@wmJsV1kFroSNWr-`DouYN9vPzHY=+ z)7Npgn%-Rv$1kX>IQynDNz_R|MzqW4wrmIsV?n+nV6|Zh>>y^Tj&wdk9wjd*j8Cz0 zq=}3D-7yl<+!E5`w-XjJrR24kAa2@3boD6oY>=Z*JruZaIO6PyH%X6l-o0GB+a@$H z$3N%rK-1*dzK=r3i?)cDT=u?QbMT+DXUW`R8HdG3` zF;N;cb3ew5;Zn!C%>oUv`>B*oJ>a_}F`jfDa`_#$FdE84zZ5;EZ!-2$d}HFajhGk1KQfO->Rcl7rHKM2O2(v7UT$*2)8{{n=xQIQNm6pPe;ob{^&OYEW)dC;`B|5|{)%S66M2QO z4hY=(NLr*_v%fFkIk2{e)~_sZ&zRN`mBJytL+C$@DWp%|XOW~7ml^QO4*@cf zm{aMyyV6A%!Q)invi%N%c-r^q^0za&%jxA~Fnw9+Y^2dQ=3`DL--i0#eaQtAeXFkx zeeljyh>6aK)9a-EGMjxD6b{MV^o=f4OcvDI%m$e47OeyXWtk|(a zep?~GV=HBUB=8bQpp2e1%6F3aX?hvGKOQ7d=EPtVtXZAI#Wz* zXWCkrGwC?n&%zL=Y^1tjr=&LLQ2E1x0taw=FxX)3KB1hpnYNCOoB+S{7bAs-1Sl$* zd^M2d1K&8KhVj(KJ!Vm+txyJiiaSW!41a(fTjRHNUN}4BYHylOr>yG7yz;jY^(* z5Wq8l2;Og{|l*A=4CdKwinn`cg-Wq!#-Z`7=6Wgf7a$mBi zqW-DAxc@z+*(e{T7|5q@+l4$&k#mDH*f$ zlF*-^bTO_%&W{GaErykhBRJAXp$j_FG!OBicsVc}nvH3d4X@m5j{DK9+=JOW>9DhD z8jBRA@W5W}PbJWv|k1iBi zZWKYIdBr_u9|^jBvPD1o$n%W0HURpiG9#!}vg0eZF5gb?x2o&!^Xa)hcALS+@xfbB zcv8Ohj02J&RcrksdMuxGSKII2O2ZaRt%TmZN$l9$GWHW&rxy;2tmF)ZC+&`MdJh-Y zDvMlL7-mV3!cCDS>40i9eD5MDVwO?}RKH6+Vl5>oi$_Y4f$tf>Ze`N`^szBtI5>m% z2`=Er$B1Ixq6o&FDO_9L3(D}I&S?)fTn)MMEJ$7<77J<3&x5$Nj))~VNBrj1h@l#9 z$EzXgHTDcGt7Oy}kLz&XKn?u0{o~WLnd!aY*jrhCvuElm$_5f_%g&1h;)8mw+iL3L zkKY%-$IRRhv@M8c?s}+dQGWs+TX{>kuF=?x9TOpSU7uH2WV|R+$3qHZ(2d#4!J-mP z=tsM>rkywV?Hu-D11h0NQm8)1`vOc=dxDQBRKK-Zt|CWtCt1kI}e`$|HRLPCJ zpBluwcj(-1x0(AD{X8EvboGy3G0G0Uf)a0s^1?ORmRqwL?ByTkKRd8V$elDMl~-Rn zBMf`bcfaCYE4pqbOABABf*n(u=b6$&?_m6*9^QO}-r-a@j@{A2U7TX>9=G^@Jp1(g z%^^MX6ip#b!AEX9rwqyh{TPEOj3w}WW6rGg1r3@xYE2-7u8S86%(rPGzAG@;G|_Pq z3}OQoTM$d&K`~4cFPJa|C}YoJBlh+&gUvgdqMgc^C)BVw0RIuyTNrOrKFs zwHY8Pon?3*ZeMvGv4IRT&t8{ip~L7 zqT6%h*YyRFJ!N1JYQQi2d4wAM!iQ8X&i$w@F)9>xFkqH!7S4u@4T8gBN+)Pxma+-{ zSgJBbtWp{u;>NwMWG49fHw*-C<jfRyQmUe~Fi4iuoA6 zK?qxLkB~&`XoI@#{iK#gSsI(phDn-W<_AG!LX1%jFE9!6CsP$+2eciB%Ij~2fw4HF z4B8A98dVbdJ+Lag&g@h(?dv9yo932R7YDZm$=yYeaqJ%Il^0daYS+7BzmUu{43!(deu~LO%ga~yt+If}PcO~LnaL}D6qX}F?bO)Mgf1l0G zwCWsb>l9TDw!qa0U~AKq07n@xV?51qTl5|WsLVEwFy~U{VRXuhRRhU9pva}2=gIj< za4fJl9zslSd-|To2};fPIcq|ixF4vm%yt{gFn-z}`PQWssrgtO)Yn$NWOTZlLru1} z6@3&+b_K90n56-N|MC1Z>?q2mgIjyX4^_jWpoq2N!pKiDcV`uRU=tir?0J;Vh>D+zz~um`!-p%JUadl==nn$INtA4-t1>*sCkCZVM4C<8#L<5(*`j9NPnh*~xW47I{62~qY^_L?7+SU~ zX}!On`lDkE0!tfY%xeFY$T~HJS5?@28Y4r6?}pEv^gm4k=l*UZP*WBz)ifXiF3I}P<#3M`f^`5AAqZs zXj960(q-&e4Z%MQ(Zm<^mlG!{Nz;{HuN)BrFJSoXw2d}cMblkrgceF(80m7K!HUr$ z9>z2b&6hqjjbN`p9@xr8}|!7RprR}!ZE z%}^olC7zw6LbIjl0q*kdglj^T5J+W%FaoeL_{T0%2FgC=I-G8X(2EBo^9#oF9IhhV zG-jTulJ=FO3#v8|uE_8>7QJqQm_sRvmLLCsL&Hm8PH|xP?h~?T;uuC~FJ8MZ=%bBR zWvMx$a0&~PiHD@2P8&bNwLAJrmU@IzUd(Mq6t#~SqAs!XiLW78ivK19ESNYs`_52W zJ7S{JYpYC3%IQaJvBL|yf`ST;g;CAiHn?p=x$ayMPqAA{qDMg(46#_ZSi)~+Zl=d9 z!TTSzq+XagDvxr{G<5kfN%)?$_GmK-kV;830bDQIETiKWt&QcRR;O($ttzS}yw|p^ z?jB+Dv}5PN6D>Cn_*=rJj`GFso#GW>BCzLEIM@~jmQEfQjQT|foGkSvI#$BU|mdd^$%!aNemqj z-2MQT7%coR(dcKyL*=Co`z6ViD3scg6ckH=e}J|qCFprUay;~e zR?kFDiw0_6t}45w4_@bwIfVg)^w8ViTy}vk^PI0dwGMgbAZf5kDDjA7tushWW@HTE zFb1aG#Y8kjSfgsAn%>OltYZr$aSxH-O(1b$RHaiJV=ZsRfhoEkYqvc+gx<|Gpxas0|K=(H{q#A9BOk%IM-hZk znlyFiocX`F20Q>WSweEH2$+SP|2mrjvR1XwdHY%Pi(fDRKhehfsa^M>t@d^zhfd;F zZ^4i$}THpoO} zn`6h|5S($3=^mrD=seC(niv>PhIi0NG2D+OsKpS1PCIf!K}OWhK)6olKT(8X%+e0Q zpE=)lNDW=kmU>~ZkcU(Uko5jNg1ma;O0u-mhXnfo0qPbK>Fu<=bAi_>8$h|UEFXi# z%JxF+1@u8V(rTi9GgU}>i95OT?%Cm0kjLuA5z1eWh9Lwjq@ML~H4qH%GhuwJ?aV)1 zh15C~hp3A0eMG^g6@|R1Z zI2q@OmF3L8-2=o=3UXVV%O>kp=g0S3EPIRNXLx33dLC0#sN)w91u%tHV$4z&#*^Md zlqzg;?b(hue?FbOrbZ)7dCTIr{mQC-<`NP!;WxV^VH^Wa83m4Antdq8Zw@VqYG+`E z0x!^2^+tlh~KR!RZ{iTk^ z?IXALAbLwWKbD)96;lU-;@FNzwwj8~b(n4Uk+v2FwjxiC3s8OT?**z3ZeB%HS7THA_p` zl=V3x#s0wrDz3Q3bnZ7FPq>(7D&r?Xyg`DS<@KN&E#%gVUe0U?eA>c}IlvS_^G_wp zoc?Uo7*Ypq?Gr<(L3HpQ@J7LEen(W^C57|fj68C%8ua8Y2=D$aMd}npS!kGut}Nnw zk|}jFqJt&_4TxmBC^`FXUlU^F$pM^tU$HSM2nd;>VV#_@~eff$Z9Z93~>FK8~Wa(zT=Bj~H?dFg)Q}U8L%xHGNT#q`! zz`42Y*B2)f@Vmrj870Y5D7fChBk0)5YZA0MX*Q-4*~-(Ou-LFvO7}ac+Hwp|vV9KCOD#ZITadB+=HMY;DvZgLZ$R8$5xn(4Pil<$)^%- zRN^FHPgyq1<;wHk&%|49hi`{@=xx8BvhBo*wERH8;0uy8joMajP9AOT-eB(a`^yoX zBMJ7VG-5ZWK0iC+&cTcCyd=_6LeH(4`*|Km$z1hBdmQ$#VLYDDr9#?R^>p4eVadx# zOFr6h^y19RXT2FL)_sL4;}g4OWIzM-ygDGzde->;gh!j0=5OR3lB4< zE2N0olcrXTGfJTbXror;yij-l!Wpm4$&JtT^9B$7^SJxTUM+chY@w zua--`pb_6$A$;eE&Vz~o0Z(U@;>wHnUf$}v*Jtu3W4Z05dEcwaYe`xeuaL;Ny9;Z} z1rs?M=2;Gk#wL^kl9@lu_dcEWxtcRFj&dt$cnVk9ByHV6Ou2nKKXlY-g)jdD;qVtp>hNb(-luPq8$IX8A89J` z(mu3g6bm0F4U;VATU46guX;wRyBhpvuW<4-8I$v0K%lWEI8Z15?b(tOBSqB~nSq@6 z&L=nC;n<|I{Mb(R)|n%rS+gL5jU7JpRe zT1XT9cOE(q2|7J>z+1?tRM?h%^x=;h&{QrHvQjX$GyQgBr^k^9Mx?uL@J?D0XG+G8Y}%`Qc%`d9N>1gAAZ{7iWKUcmWZc zyZquZOX`ai@~_!4c4r_a9hNoV``}}cg|!vVU&~cBO0-8k#yeY4e?jep;L-7H`Qfw9 z{E<><@R-@Hz?2ZX#|KmK^AXK&;`pz#Rr4_Cp_rYm7ZiVLDP0fY_3S97 z?MIVYl;Kc!RzgaprK^{52^*1gZW~UC6-?=&BA!aB2#{<+%i}vyIoF=f`7`y(Dg9p4 z#MNiG6)v|G3-O&fG1((fUK_vut&@_#x;%Q_7Jq}S-EcnTi%>_#&6|G_nY*hr9=OK| z6>vMBu1sp4cGT%rz>2q$lgy_~?^nskJIKA$614kcUd)}O%xJ)SpWqes4ifUXR_i@; z-VdGO$_E>pMm9E>OE6)`qa7L8A^kK3aZ_Pk53n(O@Bhh>zJeYIKOM>scnp4b*ajS! zA%U=G>q*F#k7?p_7^vX*1-|uAVii;Bo~?rw-0ST5YYOGN*eCN`*GTYM?W*T|?T+gv zjM5KJ2vST`n=?U+(;1c3@jicj_L@^rMZ$cGpm^}t5BDQOR2YibxUFsRj3(<`iu(s+ zoNsLJU%F~yFcuPR{Mjt?7Y`+e@aWV3obT}Gk$=9YB44W+F5C%+%b9ObE<1p}ajNxm z3PUZr;=sCZ(D-h#hj5u%T~TF-cvXpb%s_JT1@>23Gc2H|?$E)d?Dq{K1=fEfh3D)| z+-%-s7bR_ySKX5icKuH+K(s^s;zHi?GQZA9wRr#FxpW8AqO$+}I9;w3@4TP^ zR28iRc45y#<~I4Mc#C1#g^TGL&Zvd}QvCp0G;%0hsyxnJ<8LL)3q*QdoB-wS>XKV{ ziFhA+>P_78U?ii$X3L&@%eFce3-#P0*YhI?X#yW;syam*TpmJmDRE+7*(5sSVs7`m zxQ)2>**0Ue+t}6R2+|7X>-#K3t5fORIz5U>I-6rD?e$*9|DSLOzUUS=@gLV_8@8Wn z?N)F#uWwG2`}K)985$Gv<{{OcK{lXaq22tu28ZVcm4QFX%aPqZ7@8E;9!9k^r&y@= zUfWcj^szpzKmK~$p3X6+-p~1M$^(lVFWs-4hM)`qm`v-zn@_}B&|z1xS-PEp}i@CE{a#r*~+71BFEw=U74Im1!i4;vx2wc4A$3P z2dS|B128x3qJ}5~e>r#N^>ILfl@WT#iy>ZyTzjflJYCwbbi#~SX04y9?Ze5)n`L^A zJ-qme#~1&PU0KdsEhRSR?MAao-S5c;Y+Q+DojPN_R#qYCrt@U^FGW13_XG8@PhT3# z-=~nPGMbk&k?IkhmS7`7@xf^9lRlk5Zbc!=izBB?Y_m-QaYIm1eF zB{3;`D{bwqP{HPGJgVLmB-$plyw5wJu)Kxy@hw(<${wx&QtpDx5HodqEu$`t;Xr}BkhZj zah#9?Gv)YF#lWr6LBk(M`1F(#ZAR1sf{N#f`LV->2Ve<5yFzb#eW&-(zA`#;|FG^0 zLx&$;B)h6KeKk+;MM{Ow49UJwsX2ulN>~hGm?S$tF8ry)1rn3OWpS)dA*Ekr<#FiuXYlcK&PFXlUF|97TXPg#*w&y}x)* zm`uNZf3f@k(#rh!o7E2J{)GR}=L5PtZ)3(w2b|SQrSEZv%2|td&Fc0u0>;EY zZs-H{;q#vGDcv6bPVah6mK#67VYO*aFPn~PZ54g_);h4Q$0_a*#YJM`kKz}W;33Kc z%XPDk@;KjYy8eKNK_Svyo7(x2vyfX#OhK9{sk)`npU!J)pQP#f30S}~D`qIXWw05} zCg7yU>HIYYy_bdyzRDilMLD_08T!OMr1LGmhtXItSilUo(wAU|3Aje_eD%n3)xj{S zmYZ}Q0v_I*3WpT#8GW69I^WU|9`E*T{+6N*qX$dT(Tehr0C|tT^cI5uN%Oz{9FtST z;9|U04`3kimNOoESlagSsyp*iJ$O@Cg2jvS%mX+E1gOXI73+hpCyWda*VbfhuME4L z+0`;4ed9&Q23?IBvZ<2r3uorZ*v*}EXX9KA*%IuuIf}VmO-2117Kl?iz3bo4)xVDG zY+P*<En1Qf5|{_RilbACIteN~q%L+u6`EV`OKZHELHaPKf+;wYN*uwHjl> zhw)Op4HzIKhQLE&rrZtLuJ#=m;c>_qLkQW_PO$a$x@M89NKQ)?yw13Cj7{U$b>ev0B*R)Os(^-h^69U7K%Re_TyU189y6@PI?GRR% z`d247&FI`C%j&GoJ7dn*1%>xA|Ky6O=Jj=XV%Ysg!cGogAsvfY(9l0h&~u1rP-at~ zq}ouo>YsmG1(?^C1OO%5TJ@U$jX(RyioPXmc&hz)k9V{8x5mm_zu!D>53wKHe6F*Q zC#;)3?=ognJR2e8?BS^5Nq(%3Bhok99xg|TyT2=(7Y%rK??D}-kM?uokp2EQ?sv}T z9)X8NMREy`UKn0~;~Dn*4Tv#ILRJ3Q)8X%78z!i{%%0)Ullg0zNlTw~SKy}O?8|)f zM>F=!?Maz$!JAe0Ek1dzwC!s%)F4@oUPGlu1Vu>9FB&78`Tu`H@6x%QIf$5rp_@R_ zIsHq~QHgh$i)!|18?0%!Fvs0m7CvLjxc!gY#Z!6CGKy|)p_KyJePk;=3+c{pGhZ5q z+1ShHJqIt=yU&Z74wU`8`II+LkT-9v?7{j620n$d4K8z-f#Bn;+re|rz&(|A!5!wg z{_l0joD}!yiYRGxJvp3?q|8x~U;ko(Hg{?5=L%uZyw5>nudt+roR!#-fcJkjE7}%> z?T5#=6j%$K1*}e1u0t8PVTzDvgVxwae4`J)&Kn5>4tt4uF`*krl~wOuVL(Esu= zpEBNqnCkz0KDD9cCH{sRb&@aIr-1eQhxu_da9M%(5bu$ZnAjaBYE#eTqN&N3P~r3S zj9YfjMNK06fQtQCB)?9m0jTaWy#unT0#7T_BIvM1dd8!)`*HD+7BbLACR7t@e4^{hnk%KE`c-!2j8v0WW(+wkSwd!3v}#K0r1_W&RKSBg^q! zyYpbRM^EdITVt(v?t5J)rEtfdWq4(dcXt+OPn+&cz#9x5h+2tlY>REwiS6yH43e`h zt$yDk)KYVtzCS+}moq81Q@)Ew|Mwo@nC&5Q$hijas7eIvw&|Eqc9u$5PU6Vx;DYs7 zOCQKZo80Uf|g4_cDE5Y)~oSAN97Eyk9dYdZ|&zP%WaK6$wEi6uZjO3 zf@9#vCfm21ZKhK7w;@{v?IZO2(^KCY2T1ZaBa!Su=pdc5qfzgPcFXSyv3ufD=L-*& zJ0y)Q6x6Sp+1i&ET=bFhMd&9KSc#W_1sTrkL1w}T}i5f~$D|U7&js8DH zcGx~5mHjqsRV>_xlw5h+n<5T{GJs`Ra`bCTqCMLmi5$_{HZ)+jtSPA0h6WrThY~Ha zz8={~kK8Ni67-}sZHBa!WyxOQ)55dTcu_VhnZ@Wdw6Fc=7PE7~7T6Xyx)u#z{QYou zB8bX_Y_4Mrea(yL;`GzQVNx@x@LP0N8jqG**8GJJxoTWpG=D#E?7qhAj~!JBmrbX_ zAz<7?{N27A&vLS*^%zvu{NIW=Yzw=30K1yM3)IoIsMO5bA9n=xYy3+tR(fd>*^Qrn zyJ9o{SMyOk_3z|ow{XKfJ%`FsBKE)ozUkpL>hD$M<^J4!H%o`D=Gx7(b(OrY3-sGs z%CiS1-|rH)|Jj@mO$$N(2F)v8D(1fukiSLhZ}d-H0-j5sRi{ zaT&;w8jrDC-m4k7oFgOiGN$qUp@ke_xgJ)(geb@uEFDj#RRKtA>}N{pb?tdr@nSK| zrMcgN4k*(9o@^BfATn}=yPNHPV))HBWZ@XGTZEA;z*NoH>n0gpnV@ZucH8-I?s1+( z%I8~Smkvd~#PTqU-?4R?J+%Lk&?%;iH0H-#Z`%f8T_yG}Xh%t8pLoMjzxr`tAWCn} zY_N4@`>k7BcfH_*bIy_3?91D~#zOBeq|Jb+(Ce{Q6kc@66JBuRcZ;WtyF-IuTuZOocAmvDd^$&L=wZ#`Wk(QKx9lt?BsajO~W>F!RInbs|@t1}M*p z2P_37BY#>(k7<%N4!dw>q>uc{Ub2rF+%OdTB!A^pd%cc=^Gh{rN{4r`-S>L4Txzyj zm_erpxGxD-zdqKr#3&HJmnDW2IY;9G{+hs=_jV0@7s~TNT^~>AK0EZ5+!HwRxexEGsYHvj5Es`kk4D(NkV@Qjf&FnvYzVhjB_v+4$3wOs< z{;7R5S@E^gm7jH!YYZ_n8u2^XJ*?#?vsw>x^IE$7mHXKxsnA_hLcgiQva@zx^bpI# zq8Ark3IyenJglo+U8fdA+1`n~ITdJ`z9Vm&7RLsZFijKlU%n@xNBfuCMPU zkx_+|Tl7f@x@p66T+ZM&fM>W?sB+B~Z{M(*d1-+hCg5~>56N9VT_T#X zx1Cr2wpw=AdOq3o^hPCm=gpJVay$N3%QD|pT9G9ytYygboA`a<&nUx&7GFLIJ(eFH zmHO!@dg5(zT-xh}#P|5^m;2^&1bq(|q!Pu0p#W+7j^~71ODf-Gjjk!Jx11O9-$Orcave7^#8E+ol#A8O|&2auOgzN z6cMpdlrBhbiVD&d2rV>`A}!R=n;;@8pux}s76b`|UPBiFL+>pF5CWlv5(pUTed2fT zUGjY|%O(8CT4&CjnK^s*o+pq0BOOQTTjb3E4JGhm<{|itA7n*XDeH92ryG8B!+7;9!zvro=9voU7&IUkLX8ZnLF zF8i@^3|24j2Kj#KMOC%7-R|7$1E_5cFu z6)7*qq_~jP3y&P~zW|62_?+Q$rXk%7voC&P=Lyg0C3cO|j14z~O4Fz0;`a}2>alzf zT%GcXseFm`^@woh7^7&jV@{$~mhJHPI z?cGAFe(t_=%$OLyOCm2_-eP8IV3JwqZmtV5HS6TA$ACrkn;&4KuA-&KbA*tsgVlYn zGkK?F9rLH~qZd_V!0wyojbC)SCye=9gpw+hAozv7w}BLfUjlX&mXV=)cRKVm>Z!>V%hR`@1fXl0g3a- z&M~h{(cK9XDSEv{9rE$kt_x|hVD~=rMvW5sTm@mn>gv3?TWhy@MvBp>$DPC^eRT1^ zS4KFX_SXKOE)E6gf;HYhaAys}#rBM*LIQStHVG|8D;u1u2r`~oepptF>v+ure zAoMK(7SO$tr;^dn1-J))Q~rz`>@+V8b>!VNC7Yg1Pg8hfW+{D(Lu`i;kA1i0sFken z{Pe(L*i*vE*i>hDIkyV9P!VLD@`|otf(0gdM*xW@XQtpL2Jp4WC^9 zA;7K-1lRvsanTS!PjNz27VBP^L0GCMfVi7Uzal>Sb^19vzsjWZ{Dn;Z8Z}@XbnbWL zxasCYg5D{$=Uff8FDI^Ar6IBQ#Nox8<-^Heju=-b(<3W>djkEeb7{21F8 z>LhM~8g<@1v@-j{U;VcYnUR7{5hV%Wh-N6MY34PM7rzJqpVmhc*y)%2e|gxu3NJE0 zdHzaCmVEvBw`ifFZ$fa6fb5OC*UK#32Mq4gcD#ReY%UCAmj1c0*>k|Wk=34o5P5k2 zCDvZ!BUnHaS?jBeC;KI+S7Ua26PMe1ic*stxF`vC+vJHx6$4#wwj=mIW|m z;SJf_Cr`7O;Dk*kUPQr{wJ7}5W}Z#R7)v3`d+CAf!3CPg;} z{Hj-Ia&(GzP;;ERH2&f`5L&N32agK6Eo~~kZ}+2LH`;2#IER5i(+T#eTtd`=rT;ll z?>oZo1_et|s$!t0$9~^(_W{D$&;AdmQacS2%5wU0z08;We0_q@z4uciRpF8jCw0?> zWJAiW{pslyrL6r0O)jq``E9P-Yq%hrvLrlwl|0myJOXP$dqYC8Nf8@afj1t}``G>V zNw{4dins`2enTfzCODddd=LQuBG+o~-Fd@* zPlLH`APwk%G-!u!sz+mc8;TmZ{htDxn}s%xVcr)R9$sAAgjo41HJVD&6Lf}Uy|PmJ z%ISJP0`?n4U+QPs-v`wDay6g>oCaFX{Nj{81l{`UXxKsS&9Mp`XWgd@!6sXo2hysv zBc&jC>;1+DGj=N+Ow!=qd%drGQF+ovkEauUkAJ6vhW~LX_dmeVivc&Is4>mqP2X3i zl>k$DnUl3xUQd5t??W?c(9~?b!$d3&6nve+Lo8Q;y4RhKT>r5U{#D5w34K;F)w2*T zQ1J$r_4)@|Z8}SBx`o;o`)cB1(}eu`Hm7uGmXXjy?^1bm2>hbwQ{^S(z!0tfMKCBD zJ8!)wyVI9{t>{a*f!DQQuU}vO?ab!@0*%Z{b>OxqV^S31*lIVxfS{>?8E9S^;Par? zsGd`{hHF>`Hsxp_l`dmCGe3`;E)V9^qIc*zpC;G8v$1lGO7}!vk6uzwfq$q{Ofilf zp6|{WsUx7W+XmeM!HwNk2BZP)Ky!_hZeWO_S1K40;5s{FHM;LV(>kA2&&bP_yf zgRCT4ipS>H7_t+jIeZ%Z@96~X01W1$8Z2)uR45PdLy7{_8QIKBqUK7~pKnxMFsU9H zmP(oOkC(DkeEjsm#k^xEyBr|M==D-w-p9NQm>%opACAdvYqN+=6_NF|7Z;%Q8AMMC z1^jepbGrG3FTyxOA>G>GePwER^%Wfa9GAi4ad_XdMYT`oIJs&8&KkF+Vp(ZFxAyr4 zo8h#yMZ^8~Ci2#Vc)HnpX3AVbDMS~xg;=NmtA8Jj3Fvpg`0If2TF3H_14E_{8Do#@ z(n7j`X%=95ci6k>1)K<|09Fz(o1DodUi4w0UE+7hl2UC`PB~T(#7o z(Ayr)GY#N0OC{yXc$(_!Jrv62`^cGb_Cat$X82G$K7A+?)KbUU$97{yCv*2@!&tH)USAzPmPJXtWWiB-uaS zx=MY6(trQU(FD3bE7NJ|pt^gZA1Y}T*{NY&RUqof)}4_@4yt~Y8CmTUT=!9e2F=n!ks)H}lGsPSsn9)>&bW*5{HQia*Z0P1_7P=|fk2l^AX#bkkL* z(VKEaNg#NFKYVo0`kJpnTie@#%%QXIQ7`>pOdU#bJ-Kl#N?dG<=oX)iO6cDjK#8ZV&zs|+c;>WqcjIusO`AP5I)T$6R6buP>TS1V(jlKVJZ#;ij;)JN6c@WBNqN_)9LwGZuwD zUcEOcXXpvSLr_I8WDB@rP{bV@2Rt7nmLg0kAwmyU8s*uE`2dH!{z^5Y>DYn0Is$}) z$Sr*)a<1^Vb}+AW8`Lg+bP9=6k5k$jxd1x#p`YE}IF1g?F5gvNe7GB>e&gF`wb64G zidnA>G5&cUD{cP0^lY^P$xs~tj66e&Y&d8W(&-ciNThlJaqS2n|+9 zuwlV4dFqDN_=m>3olYLw)eHY`7NF(k=MX=s>$3^cV%{&fIW@1aH}##Yi@~Lbl#J;2 z1{a3u!2gvH4=@Bq4j%dGUDgF27Z7LZ?&hXWN8B>6bh4LQLp5{kIvUUelRplw{Wij# zlUt&R;`@ioINwrRM^mw>$$Vsd_rkbj!#m!GXU;QJT@$?KdG`4EGxS;D2el_T-hJc9 z4-da`7ytT&kl_4njXQ=7-WSgQ2n!3l_PF7RT2gHG%n4B0Do^K6aqanjLhcw8neKqH zj7LxIr&~+go#~mi1DzI(#!JYGj&5pdu_&Ol*{0b)7;TWT&^wF%nks< zs`|=!e30nXvG zBhQo4pp_Q3%pnI*^{kV`Gk>p0WBwqFYvw01-F)>K`Z@C%pp}mean2Dq!KgNVfXh{g zuPL_eG(srO-B*~bnchn8R8ah3H;^A$*_%c|Yeshz$&Z#$1iyT=oOFpCyv0et(N9+o z<>EJ!UW~c&scV`yE!I@rxo~8~d^KRjwzhP0H#cC#-&?#-Ng(&I#+_fRt{P{>7tK(^ z*;BPTxw@BqKO@{spw4=nQBi(3t6VGE%!x)B_J$gBOv2lOgZZ7tob-bO_Cn`>Ds!Ev zW}3am19iNNokV04G$T%z{jw|s zFzpHG8CW(gc&sTww*s~p!{@BL<}I|XFq4q(8C$wIaIeP6yvBaNFRwWyn%;^i`Y7{k z+(=oS@8m=oyRI)XPGE4@t$boL$u@y_^Fw4%rbXr{!ZRhPrfq}~RJocWc*Fs~d^{=G zM%txzfkFEF$qbj!L2o*)HCG)OnnIA$)_n)<>D$)MsTqg>KNuGr>6H0(78EH2rf#M2O$ zjc(^Cmu@S8W_f}V^*?KX=r45cFPe9 zy#5bz0s;dtGrFacmmkZ$h26Usq`JqGds*Fa!lwStd*p<~Rmg66LC|bn?NYteV3$Ds znThg8#Rn6X7^lu0*s@@ubnM97OQ)~nF}*o9@V^(P;r~D*!ygG(9-Teb1TEyh9b-N{^hC|~mBA0Chi?hNOC;Ony5)xk|z1C1dqk$SYU!MAgN z+tG0U+*Av1(U<^O1!Hv^_k%$(bAf@TrP()4Ikj(NjQ$Bk{8`+XR~}qYOa!#rm<_HP zB=%+g#}N>uy%DoYJo?dRGPJRmsxIG+Nu%GO5|w0wf_DO z5EZ86y$AiwE)A+LH)p-VG&bl2x?fi9$BN6`$k$9aMh>y0yJQ<6In3V(VIx>Br6A+o zOxAC-Ju{h4tRm+iyYjC^N-tbx67XG_tUlmE7!hhgBw~}|)_!+s%OYR@$}#5|Klu5d z;WL-u0q=6`R*#flmn0xM;PCDX8p;FStXyk_K(=x?sQiu;ub8|r$MC?s*wyEPMJ3!Rwxx&7oIEc>2f~+fF0|iac16h?Is* zmP=NQ)I03!)XRepXbFROGFREIF2)&3B0_cGyD}bo$@2bvo(dHlO;r;^Vg`i!&TD}}4scD& z-1r66uSX0ByCv`=hL^I-tDXX}p}RccO-BS&TcBeLixp+irtL$f7+gz-Z(!1KPB^n_zP<+N=*w9RHGxw|X^~()kw0A+>Ft0q3kaYT6YdaMQkISYTejal2CCfv;P`0luG9Wu6_T4>ES> zR;U>Av0NIuRge!7*jDOqE4W0&v*Ffjl{?wYxUyXX@0FkoUx7iBm5=!THYKcxuYtKs z;)J=KP-G=hg9L;HZG&lj{SAwg`5=&${^t&vf=W9FS!L{HmseQ2uF}5C^JlGR%rxe- zj;nwMfpVQc+#IDcK3eZf9L+Iv(<%1tX|0y_wLp=C)D`?UF+Ovz>8E0{j9}-m(@wTm zSlYFb9p`0nAVk4`-DqL!fF*T5Z@a_m9~MtgQ{*R*B2#&*O8(Ll(*+K@GYC8_mpf)c zC|7XM*R5PPTX&_-sg$Dz0uy3PDlA831TAsKWxUO5mf!?ezcH*j%>tGW{-Q8(Q0}IO z%;w|ViTtJO3<^0gQP5MKz~~6vsIRzRH@SmP%YR>3t9@f8n91-+#&T#K8=ulj{wKdU z4R4_%Q$)+6A<^&6N)tpqqC@WIq_*Z0n%iN{kB70oyvbuOhyt?$31cn+hSg$S#NCzg z$q+Pr;TJ)-QO@)R>>hYk@Dbij$*H>yM{vlVh{tm=Lf<+Y3}SFbxr#H-1oXfw_^qXQ z@<~T2sCjrrI#auON1gxVaA~O~)$y%WQ|x5ES!gOgE8<(I zs~MEUECfpTDvcMoyow*fVbE;k_?oBX8QCj~Y&pZC`jKAHc!{MlBpX3g@bM8CYi<6I zdxW!tL?s$^ema3S?irevrVGc@xy>&``y$$?3|al*n^%-n#nP$PY(|dhEuw>2GRoo@ zhXvDq=kPZeF13L4fO_SX0SKwz092o7Si>eH488r4G^#jdqM z`H%dtpmO$qPTvA6+Ee#@?VO-{ljzQ4TKO zahK<*o0_GxGRv$6P==yi$SP2KX5Pp5R+!vqD#SrnCpt1he{aEw3gseuruw)7ZS`T| zcS{m})+fZNDjw`Du(Y-02O3VXWOX(0w$``YHR6E`Y1%!<4Yko(m`1kDR{ zM;zA@=QiM_a|px7Q`2%=&+`e81CLq`46IIh?>6~Iv%BadE-Q`d_KmtAl+?nmQhpkK zI-cw7H??4t^^{w3hi%tx$T4>i3#|+FnOr8VBrgnE2~e`Gb(%-%A`~|bh3W%r1$`}2 z^Ndxqk@5vr^Rv;_)&AHZ6`^}0=_|!Uq-hkb6&T{$orM^N3o`s|TOr&~Wb$2v*Ax9p zX*VaJ2brfjj+N%ac27Umy3|qC1wrxSwG$m#bmK5vNAW7NH9Lm0)8##Sb>z3LI^{1T zRj>ExI&V|Vx2B*phEQecurn2bs8R`5=OomVqJa=Fs9JhQ{c^c?K<{KY%2Sk(4q|mB(H#?Ytm}(j=?{xr5<956K$r5j+j`rK`o_ilV2n-C zt#3XVORF`%*P^gm8zluAaJL)KO=mRL3GTp6#FyhRDL;XaZ}clvQ$oG=2blNfC84Aj zGsNYDTjSr}1b|k`%{C?HGm*+Zn29njxbVrp%@G#_0Gw(70N&0bvbd~mYj0v$sW3)V zg9P*gsgUC|NiL|6+>EWcrBnQ7Om)vhpc4v`NvAppuB&s?Wkx<^m=9_4dT;kA?>bVM zYja3f0%V*F{FlM3l7fn_LbsVCI=17R(JsG>;r8W765CuwZi-Nv%9p)pN&2KDAn;5=&k+OX{myCPqm_FL<4xxVu5esl4BOh;yZ|RidBH)NK!Y(r}RV=;zo+axUHve#wn5G5g?qJ*pnR zUAlx1xAcs+gad~U=|y?AuZ+6O$g(r-<2&ptR*>k%GZp)RMm&!|C<27^aCq@g0o8>_ zJXtzGdDYg0e9FiQS?2PMS5I>yI1krkUVbp?g@lKuFA(mJ#yONbl_}S`b&n|l9nC@1 zXZVqd@bV#Gb~<2ocWy};aJPH_-h+| z$P&J1EI=vHMQmG&Ot}_i1kDD(u_)uJnE;`Rd82i~Xy=kg#&REYx__)J?Z#2NX**Dv zW&AmfjA8dwvY?(N#33g=jiw?Vgh&)4lMa&Yw~7(9_^a7iN&-9~*zSeMAP9gnh{=&5 zAqzATxwyK$YH@g=#iF}!xiEbscp~^4nA^@ww~+naU~&9a7X2}4Uvy#W0u=bmT^btx z5v~M>>fP;pH*{CAgC)a!%tmvQi&`JXh*Xy>znYp8zgYs5U3*kpvomTOTGpn`FR=D{ zsbWM--+man9~C8nNJnI(2L!{24`RKgsF;?u6`P#?ksM@N-NjVl>t(kNh(tbj#r%5~ zd{)Z(NcygCexnDC4(z^mDLAyB+L5Ypnw%gP$U_6jj&90jvdRnJj@S=;pAT~*v%e0| zl=iT+q8Vr|8i)FnT*!B6r+p%>ERH)={Wgl18`PXo0u!3C0hC@PT#ZXkDvKU7??9AD z)}zfn=*!B^Zr$1&nBntsZG%BLC%C|+Rc*sCY+%-||4!F0#!6f%c(ygRVq@>RK~)@0 zg$SWnz^uyDSKLdVPo+#;Wy1luDkYWP9f@X|^G0PqHtyWaismAXWpRys1oKnMsWb|| z1v$ObsM7ou7hT~vas#YYn}y{#kqaW_kIL5ea0a zDVu#T#wHbCk}hs1C7rU?ZkCGT52XLGGA1Zgyky{Uj-&VjM~i z4FRAE6Bwh_Zg}`PFwfAxBf9HH&A@_yf3uBPXrcJWJ>6;D# zHAJE@(0K~7)X3k*{NM& zm0+uvyy$=fLUxA#MuV#qG`+qy)whG$fnRK%FHT{it0dOPll_~f=tMjz$H*}JWP?w(?}WnzIN-(Y4we!7h!vAmX$K&vG|cq4O*Vd|Eg~ z;G1LIEawSH*|pbIg5z;cY^G2B)Ug09X*awwSWuC z_30Wy)62dj>aoZ^ads1J5Ko|<75m_EUS~X#jYRK zj}-k%HUMs|X>$@5bu?oCIPwz!X~Iqz5n@tBJdp4pn+95GmtX;;e0|ACdpB`;4p#!* z@qEQF9Y@%uH2L3!%s3VGOd0Mw4POVI`9LE{@efY&1C&u5;u+8lajqDE5QOFV*%G?C z1Kh5*P`C0J1~FWFG1b=B%x%^o4qEJ5m{V1uk-I;twUs|%^WWxO1kU*qe~cGY9^(vP zn}`(zzUx1Jl=lbHg(vB<3-TW@wZc&rEO2{db1OgWnHtBsj6)YDqf6>cUzLZDV^V|2 zjQ6(4N6m`M-a=Nj(wqfu0HjSXsZH!YCEnl)fIt5##38g8F*??w;kg8XI{<{q=aoAw zB?FpEtkfiS{4cv+(a3OQ>Gm@heyx$fJf#{Iat#xA8xU>8{TGo3QG%%~Q4~UQNZqb? z^&R!M@nqft^Tk;=`-+gClUslANl}LI5Jt7WO&gXhpNxt(LKsqPj`@pe)XVP<4+@&* zz~IP()p`{JJ^?UP*ATNHcr&@z&=?bywOd8ul$3+CMOMyAYmjk~$k{DtP~B72d|DiD zeH|)MPls8wfp{8VH#YApD15c_WDeW~g{+$hh)oQ&GiEcEGbjMb-&3cwv@PrYY>|f% zdQ_;f_IpuTboTHaelmXi698STLtz;jo|Ow~G+`LhVnlxwkdZ3yZ5<_{qN}`#WlQ6E z0`4zMPM0m#JZreC{?(#KOLWnwN~54HCJ+P}(DET8mluucf^QH7%FN%r=BZ{wvSd7N|~ z$m8F@$vrO6B;~oF$~IUvLX>ESA|^-JBLAYfyVp&L;G^FDopz>@>hxkKTP>Vl?iT}CBL;e zbaUACUkVDnQntMb6A<(DED@;@>wQ}QcAag(35tzRbT&5P=Z%0as{PAW9&TAGDvN~~ ze}pY+xPNwvp;O%7Wq=UB=h&~~($g8QSPGDyVVM5hVU>586E2ABL_A%8#EHr$&c7SH zRl2!8(Nr)6U?OxYNSj{}HJfXOA$#`)CTse}`e-4AkK$T(Tn8~MX_vH}2EI}Jvb$D= zw;;b|(xboy#{MmjX(4I#soTiCzFysxO6fwW+gM$)*dMs_?I_%d2hjWN)=dxPkctI{ zuBP}D2n|h=Vu4&~j}B?^T-kw>(JO4`&Xl(bHQ69da`(0N@wC}s%Ip`!-q`ybMUtUH z#e&ggK~65F>&2ZR?V3U|$zfAPtRTc;2}GBRb&w-T1EeKvj^!=OV z??N*c36a6H~gRpV~KnYrRB`^zKq46f_|IGpr zfcE!hs{-)CMpc#@V7p~{Z3`D)TK+2Ri<#rxGIgmzJxy7nEUQz$t zXq6w8PQn%qt9WiNR6RF5f?ir+Kvibuf4@O~eg2>8oW-jTJyZVx)pjcO&sicQd?R;_ zh9u;YKFPdIe~Oat#!htRTvaLR9QYGepPLYhJGKfki!q@|=gsXt{YvZbK`2gj{I1WU zXlHkE5!y4B12Y_Fqx4E^=WRYh`cc$C9RFrqvk5$8O8!0#`a31Vig+8C)uJ5nK^6&k z5rCY!i{jOD1&9pmYqfdz-;)LX+iAkxI7DiB#m0u(KW(kWjshZ+0>IXDui)QjkC`GP zD{lF-UF`a~H&!NCy)&Z}h~pIlFMjn_#t#=5Qa;5K-)@gSH8k%jAiYsnsCrQG`*mQ) zfl{uFsKpW1fNJEMP|?+r`t5s)yTt*MIik2og(#wyn7Nnxd#jLkl@I z2n4Z1T$$Gr`MXd7^7Zk)2)NaGxqNeu3bOc1DLbfhh4Q6Idxx_a$9dm2E(#fZaQ)KoC-sgP+! zMSe`|e*$CyzJiTUyYYC5Sh8>43lVpH^;{O{X_W=k~v#WAA7MfoIyc7%&qvfcIls3YP^2oxrxL#y!&3qkv?g*IJ+5^(K7}m)e2z!`_^^K zx9NEr2Ssknvih2Gulfgdd#fDETZr@;!YT}7xzYLl3mrDDB0PfcXxkMZ51>;v( ziTX#l<}j3A7}s0TevCc0Ch28Xsw|71c5x522eHi=-euIkL8j34LxGM{?D6ZfYceHv zbg}uJTOp*9c482gGw!*&D;tAC_%OiM&Ty#>>5+CKiyObF_*7nY(k1!=dVmNGdQ+SCwQ=C; zYgy2szw58CFV1KW8UL1ET}Z{s*)#d=LHDYX0M%Y!sqVRLr~9s7Yr&Ax)lZaY7CfB!y9m%Ec>~GkES~d$^gWA9mPJN`R1atmG)cmYv7LDX4o$Xi z{IKRK8IWtxSJ#v$^EZr3BABMDzojY?O%$}qzhB6H}@NF;`mM97>vHZ+&Z#!zE_LN|~qp_I(n5Q~G1T}o0lDJUNpMLPWYaUE!NAw(4 z^|hg@X9Dv%So(8FWzKe%6-FTPg$E~(>=z6Sz>Jt_3xD;F-%|qyAEMsC_8$EwzRBv@ z!|^+TT7%U!@ik9A86z*vUB3-6Xdwo9`K^e1dzKz z3G?ygMoJ|);8U3ys6xcx2!f&EIDZxL!CM&foSH)HeE#PD*=A=p+Y;JJrPQz%t!ary zB|b&n(xJO3>420ERZvLksjbd8s@i_7t|ZID#BBRSH||$?ZHoNiZG$=6i5l>1>awHD z)xaGP@B}X?$F20}bWMZN0fVjgOJ^@+;_VGOD)2VyU% z21vX8x9W-`Zz-fN{t;boO<#d!FpSylX+Ig$91~A8g$&z9&IvnSIALP5oI0DvUDzj+t(WxlpQf?{3={R$s78$avPqDm*} zU5UHnIZhL7gUlE_mhF+HyYl$+nNy>lxw;iU`%uc@&qta16o3ry6UUrtD*)6z|9~JE z)1ORKtVM@BgOT;gXb|bUYpwY$`(Jlvrh+ABRpODlD_$)#+%7{wbl|y9x43XDdT_aD zUn`V^KoU%3UNUl}c2Pb<_~>X0ha6CCf|BeRX5(N%nG5_~O{%brKNXf#GuP7*jqYmF z98w(A41os9M1jM{)tWKLu2zLvmAHasqtSkLpW~@glRO8v#CRIgdouOTpY+(}$+a{s zk_xs(_h=83TicFmt7_3eO%>x$xeM@OmO!>=xAIrn{Ye8t!>61IUVjTrN)#4T;_L0D z{SN!^_wa&z#80zoERsg%#*guS3aj0K7WMPMFb6^gJVz&`^~E3$oBtU zD-u{sB`f7F`ZfjtG9H)hVTvWxhT%ln#Nh;QhL9=Ai-;e^R-5J@uaY7@HHe^`TB(`q zJ*}DKW}RxD3v0*D6shn4{pMq91vD-Ip3HEJY+WMiJY1D`=iL*SBx9_$NaBqn$l)ym zKtH5wOGlI@;3I)G(=WsE`Tq$Mt+>v-?f!c81$3e>%z-#RXX30F9$t=&OFZF2Ip+oS z>`yPee&b2qWQWHg6#a>g-RHq4aWoi8bac@Pl{0oqSfYzOS~_Xycz?v)4fKaSrS8s5 zBOUwaP7WY9CU*}&gEbb=#@;Z8mHD}MS1Q!`!a8j+G3pen<<>2)Ov@5FEAPtGDTRl>|ab8WxizI&6%E6MB>7|c2;mlD4;=f zx)eq3j%mf|79<>%UT*_e*jp8YF7Znmq-pB*tl;>lKi7e_oBXZG)$BtNjnfJF!C9K= z6}5>f8Sx(^g%c|nS$j$c3K;a+`3F_+VW$<^>F-F*8J1CkA$1uY16xw%u@D#l%D#>l z6IH4Wk8Y5fc@jz9FK35gY0rS$c2i5E{7+?0whVTkGsLwc;`0*oaRsnRtL51XZ)!=+ z6p0KTK&f1|Zu|Zt#rejajWM4S++oM~5 zm*tOFff?Q>pXpCZ5>K`N;uV7ci+bOMSv{kcOiylE2{L7%YU$%*{nPvmhvntDwgi0I zU3vMB*#%qF70sTx3k6}#3#ua6dxXva!}a<<*ht%w`+EyP^(M9G{L8zG5$s7v%Jax~ z0KiTwj{(T-BNh~=thsH*4_6*eXk%JjPhy4h7m5P#7-|i1g z*M4bnb3S@#S#jstxwQvg%#esfSp&lBt)Wgg!QLu%kjsUP#Q{pa)`JLkl}RE&PmAqH z9=_BH;{rF-14Ep0vebdnCySg}O&XQ}aZYd)#Ju@)ur7(0(?SAatCtM-B~f@jfIr3P z%Vg)GxUStsCNOSpmRFruK=}qz=>fZ{uFWQ!aKmb!&H7gtqCme>i6;4 zIB=A^_BAg{H}p>9eF6HFyN|t?;|5eZ2PpR)oV&={&o8Qub3oP2pdohZGOe0*Ak;=3 zAZZ-Td}3_>@KUb z0D~;dbOKunr+s;jd)<)nL1$b;!Zuc`)}@)FlXA=vE|8lE{>aYpBKO$%=`t(!D zwZ9jm5%;TG(PK?d#NVeDaxA~U4K+WhTvJMB$o$8+jg?V(y-`3a=_Xg$e(d>X!e#vm z3n!j^m==CZ!kER~y1Um7ArEx^Qn;3vu0rmgzUATzgJaHaS`p>zOTVOcUpVca8}DEq z@NB2J_-Lp6B**Mpnl)^c_SyGOykY=))A?P+%TCRnhOd8a<897OkFL@`sY3wVTVU{c zysP!f%q!8=e-y%DbLzIKflaC^851@ewIx>kG^=exLpxQRp@+S4TvI;Z+{3^yz0Dn= zdh;5=zqc%Y{@6H8HI0t{-L98at`FX5q*FtfM#FQVaiKzhg5+I_P#yyI)IHkU?gc1( zrr?GyHE#x!K!CQq;CH;aBj%?pG#B5DMVq43GCz5_-~B{WgJQF6_1oP|4oR>2lFCZw z_hw9QWq+(mtCn;`Z4JB2I>o@NI{vI{owplET^&DL*e-R)cVOlL?u$*(`p|Y>^(c=T z7TsaWC<0gGmy;^=q6!gzSH&+_b?WSSNx`dtJW}ODB8hxh^#}*M_*mmsz13gr=3xs^Y5*5+@OUtfR-6O5uHy@gbE1z=0T3_i34RvrtDIVv2FJpQ)8I_!8Qt+9jpM-3s3^j;vi-BWp0 zs5c&sF<@}A0l94q7r(bs7&}@3^X(p@BwOHWtiduo$rs(tM%kng5{+uZv-kSD6h27E zgj1eSr4#OwFDe8OEEOul6J9Z{sf^=nWC@R6J6H$_v3HO;lvgU0`kme{glj#8l>OTj zaoJ!v3bm?z{$~kBuZ=)&Hg5$%GGNeO#126?AB>xlv z8k%A6sj1DSMRXk$7SW`)=Kgmsz$+^CH-v@1t8N0V7gR9C6gRi{o`F3(P8@368He7?|T?6)gm z9FAYUd$z=`Ey&ugcKaTblrJ&ch0Miovj@r*_O#zns}dOkK?X%w%_zeqUw*SnoL=M& zmBe-%0J>~!W7M^LfvTqGO_Ii^cJBP`Jemgg6CO_u_nYOl#uS=bSj$5e zYc(c%ZF#c67%XAOe<;>6#B(^nvZSY7K!De8*}hidliDyrj2^HoQq8dypfQ(1aDmn^ z__YfKb9#hb|E?{FuHl!8D9ain`})9Pmr&kNM&?DFQDJuyp=2?#t~c z;D%TwumiI63yURxvZ@$Q>Y{`BOaSmCLeMMhWapNb{?u8QF_-tZkiPsF=hK~+^%i$O zeDl-cNzdQ#0w$a)a(Yj?15a4kS<0Y{F&W)4L)|Sy&`J&tvOzm8fmEE61Cn7W~vEE|F zas#>((v#g{#QE7)-vR;vw`xMs8D+O#dt(O+ih~iT3A_mP6{c-8CKsZ6YV>jSohHR9YhqicbjS$`_c<&)%#? z`x9JL_*!-^1!b*(DuT$33*F4K&;GyF4Tlb^T}{FYO!9Gu6JV$t$ZbJMxH$6o_2ae| z_`kUyp4Hj^DeBJxfF5YwM}E-SES3JCBz|V-f1Q9aIvGz-J8K_NTPm4RHjkh+#*% z1BY%`J+Y<4{Z5mpUu(a$YJ#;LnmT(avFJ(A3AoF_T!ysWYQ_Nm`&Dy_!hk2<`LNz~ zf$(~wkHd4XG5MFGXUa`XH5MzY+pS0XR(_p^Cbk8z_w^_xVAX7CRAUaiLh;$) ztNLubvAx7di4oaX3~diO=ze+kVA3Rhzuo*kesMPz7tQqG8m=GIBWYFuWn9PYjdsT(7$&3EK0iD-G)NU1E!fkv zG_n`n>KyZu(^iPcjm{G}lrQCh5a9ik_=p)D%i!-lvL&j*}+OBMy*aTgwrjs9nH_kzMJFFDaUGnbI7Q78;2diLBjVK z#={hvFpc`RQ}Gp8!1I&wc$P8XbY$7$WnwzDwSBD=zjY??h7S64bLa3;rk{1Hpj=7C zF%f%xLm`$mIT;p93>1?BOe3GHjn8C=hVR+p(>^e!;jwqm6`b=DX6hee62O7m`3vjG zcU^(OvX+^8jH|PzXRlwp)ns3{v-IXhWN)(yYq#v}npWLmZ|8&6l*XR?*0!Q*1Ia4L zJAxaLfLcm9wa!!hrA$UDTUXmxfPKH~f6ib}i>E zfXgZnL!*u|oMpWQv7 zd>F2~_y$IDc9r3NV+M0pch2WV`A?=?U&W5_W_KixyVc=8gkUuGVMXZ5Trq*}#2}xC zOphxZ9>HYtc<>e5lya8X!FP4{hbJe*bXOKu6ash0F^CU(yj9zeR{R*1`%x5a-*+;3 zdY9#}nC;F4Mfd#pm17edIZ!!;6V{G2M9r%T}yC?pg*;m{|(b8mKc-n}rbw zK%L$hDdrR!8`VT4N!SB)r#N?_bF~J0_l|eD49SqK^(68xU z#`2)K#1MYd^pJ(dUHGLp1MuCgEF*G**wCT!Au9|%iK z?JnO_sBDuOnBiVk$K%~ya)hFFoo)ZSsAOxtrPB^>%ioq$E@$|UwIr|dti~>DV*1%u zyVNgz~T)KxlZ{qgO0?Pu&j$y&v>M#pwgK^Ai z!{$lwlhV2U868nhHwo;4)zdP5l|VoHgBbSzL)UkQHF>suM{T8jRH*f>qX^UzpjM^` z$Zkso0|Zp2Y|E07FcV~BsiIXu?E56!Wx1+`5`JDoEMDkt|xe$k8 zB83gnk62n3!VhJU*8+k#Uh`|X)i@~EpXh%j19H)K3gf)fS-fbDQ}@y`nU}GI;7p zIdxcKAF1?Il0{citG`i2AvMi3tRA{z^aA_J)ga%Vw3-nt&#~_jLbFEvg+NCvKp2cwhGYq6Zh&;Z#>AE9rVC zNZJ(q^IylzNhhB4)FPQtSh9%_j~yTT?y=X(U#tP2Yv8UO(Wlav0iB4_;zhMxug6?0)e;roa<9h#oeD~KE|735hRCq%%(|-*(BDGIZ>z%3!6$3K78WrUc zdb%gWbuZvZ&;nW*;<(NmQ6y&=aPA%>J{a#~0_7i!@hUmOHdbo$s~=c>&I(_8D?0pt zpM*E+fY#B!I0>GYp5K}DxXX$}s3NMu-CCpPa?)9)h|zxR7qI3EPKGQ!mnE>4ODYD7GYnEP0t%D7tvoBSn?TzquO;_wK$|^tkpXImn<0U z6JdKZZLUqeZ9$BJ!2MODHkB-je2zcxjs_C+i(1Qa9&+v}2Mn}}H(p@}{k65AMr_M> zK8*22fd&0@DqRxw7-z$D(54#VV(^@t0xt9+!)mdUEBmA^jz0wK%9f&z;)7FnL1nZ=F0 z5{h5Ch4g7aSXp$*d7n)*V^l_zq4+skrpe>Aol?^$b@oo5q_R3&^)zVNhx?xv!pn~g zU>yF=1g?m2s$F_e>ST~ocrVk%f7E!CWv#O~p9O%#gwKi0nI- zhy^z|1Gg~HNW?&Cls+uE_dBYZg77SKFV#62{3I0>E`_WjP9B6M49sOY8J`WcgAN&c zcC7L+`dzL8FDEX%3^V6HtX;9M(kYYnn*A>wct|)HukR@YKqcDfLU8@}TRiSe6QW zBY0$?yh#12wYAaz<|!<}`{fPkj$Uts8;%tTB6d->i}0 zK-G|MGlBZ|DJ!4{$%8#R_w<0H(s+u(oA9N}`mEDywvhW-{-7xADr+?q4t6V$R#S=o18>Pt0Na0Or)ddo84%R+$%f4@O9aAHO8e=BDqr}=!*OpuFjL)KFpM?!)xJ0Zv zkYaItWZ>>!CYM%6oXGCJ1sV80;*pNJo^+e!M%+rcwuk-ZW&KQXA6MTAbP9WnYL)UD=f3Shfh>T7Vy_l{xuYd@5^0L1;@q27_4) zcNdQ$5k|os^zxi)jE{wiyp%OHCAHsI!P}(zIkajei%JdvmisTN{ed7r>`aPD^T=;O4 z4*2LrzI|h*t<~K~uPU;cpFwxoku$8rGv|)@HN5M79PgUBJf;+p5?jOBbT!-K!{OjW zIt``oQSiZ^_RU*9s;DOWRq8x$)r-F0O}A5{{PiYvC);85)5c>yPZU#fIIJ&|^mi-cEHVB@OoEUIXkaS#b%Z`q z0ascT)k=!vtHbc%UCzV*Rv`OAiLoo*^e-8^ccFtQsR?Wqu-ws`14mY&G_TGQjZdmK)ZJA=2WMJD6{~Vr&ieMHxZOlFo=Y2z5v;cH$%)sM~ z=bV!VSZRwXtn<~fwv~Ehpg^-p6+39~2&OKL9P$OiWZpAiDGf8*%3~sO9hR{(d6FA|qtSuL&C z$3A&)Rz7^p9j9r|XTc{*uhU`_6s)rzvfVJza@g)gA*KD^vGGSgf*+=NY8AKezP&>x8_P#p#vq))F9CFkrXzHyFJn{X+x7v6A&va(oz!e>W-&Z#LzIznWX2tb^ZZ&0r5U~x7N zUtshX-1z=|IqT5eGj)QiPP6edI+Mt0&Wt4YU1Gd{UtrN7C#C|Pa`DDPxvUx1^iqmr z_;0j#z#R+-j~c8`*XUSUVtClqGq+~vIPNE?g#rZG453>CPq9?7Hm90Q9Jw%{MU7oi zP!d>hN7X=&k%dn5a&^rb%NXvT1E@?-ODFm05!A}a*{*%^RDi|iPi$Df)2b;hf5>1q z6mI@7kiCvP+9GL(&S`B^#`T~{N@Lgqb@2MQPykm8(*N!3x&}2xp;#oo;OBkX-M1H` zNHB0gwuknyNT#@WXIFeAd6JFtwzZdIedpH3L6VtdM;)^1DGbtuVyy z$@YZtH_*{{{YQVKKD7eqkikGKV*MAfkO^Tv-r{3$sgSHVFUQa#W6L(XsCHyIt2tM6IeiIH8LmRYF2zRlo{aNN=|Kv&l9?ew5jl|rMZz)ri-0^n7iS?o7$iVQQ9J3Fkb-AOxe&}wsMxB z&ZA|!j>nGuQdLC`_cYF(+vjbuLIs7Thjy6{YmpD--%lelTYevM$ED@_VXe5bE1~pFo%QC|J|GbP zm}OdWDiT6Nd`h zQ;5-uIQ5baraGyp!PoK7#Em^tW#tF@$PRGb<7Q1uzC!(TxQ*U?(pV?MV}fP7RIq!~ zCs7jgfx$bPXPqti6@-3Bw6`a;2N!r5K&*sn)f<-^&YBvuwYRI^EtS`-hkktv>aYUu2gh4?6|YEjXQ}Jb z*4DCZJRL;Uz}!tY8G@5Q9&d5S=H`RTcUivB(aZW?XRR#2%edwuaVYNsaIFl{UA<Qo*yx%i_1&2jMa_XKY}xa00YvowoF0R$ZX28l!HKN zwjl5c2j=*b7F7){{1rl}5cirWQM=(OUH5vx=dRF$AQWE@tLZk1G^)8VsO4Phe08->Mk+S6MM0oDm@@o z747x2>V~E$KE)nM_SDg3k`1U!ydk~l^4G`JtX`@;VLtiAGgu!GQ%>j|Fpnb-8~p-(+LjYj zR42$33u^Vj<+xnV!0KpWoUEZU|8}DMFynAop6lT4fh;2WE+Go@2p#fb^*f&1<6B|V zmLf+1xf=w~JLgSL^6%eY4G|&M0McXX3x!C}QbFQ;V$3=+I(ls3RW;ph6QZnm3&55g z=Nx@U+p`0BzJRss;vc*(L3$wt5(VEz>X9UU-HBkOfZL?X1j)3F zTWtZO`bO;z2PcZnN+}wo=(&zt!AjRXrfqrG;pIRPKw4^LSv22*5I%qm3OTiBPUW() zYX&wl@2BKbrxlmNrJl%q{$D$UfjbEWeSZ2o#vdOE@zfG}B=5~xL(`9$EI{yF3x4YV z9KSiNHZk) zv7(#>|5~;Uc@4GsRZ7OUAfXSP_-6jZ^GUi@fc_=YVd^~%q8U-<;Fw0R;ez9TlM;v(TUC^RM`Wv2WZbLcU zS04S3rAYppmG&9`q&6p@9XgIz^acsJB4#$I0zD<(I=D`ie_d zp^;D*u%cdDVB9WD5$^8j(WOss*g$*wo-N&w*NW^9q^5%=K+n~n^Q72B_{IP#bjGpE ztj*~NdZm6Bk{t^QA6TOOPbaW}InJ3@%+w1YVamTB>JLezv3}O&H(nZ&57I=e#at*u zu$2P3rXl`h{-~orIpo~#6;Yi=ns2zt{m?G~ya25ItvdKFZA;vxBZ*+0{nl2X`r zgpgM-SPV*T;Q>lyE6#1L0`Rld#eU|;k&siSURr+(@_1NwVP~3G*^%^IxxMv?^NZoL zZ{x22TO6=k9SPDESwOFAqAZ|6#YimB>u_@@-je?jp7hCSrKdlX&0H=SCc~`4k@Ar4*&*{;lf;Y5P9RT?s4h zk%mgnSE+@reVG~XFk+*OgiY$dPtp9+7hn$V?Yhd0wJ4}fQMVM-hL_jj4WN8O{R`Bg zV5#XkW2Xk-ku#v)5u}>k9gSlY-AUe+%>qr`##j^%H*H~+%VC|@R8X=_M4#x5@drt0 zOuciMUHKHdH)H+vYF<;-*n+ijL{CPd2+aTMX$}?b{-tPl87uy6c$(tHzR(W!wIXBSwM=DGzbZ;I})WE_AP1)^}VR1Bx2vdTd|RpoBQpM*Y)`+IQwcI{};sOTk-{?=)~jFeS(=f5Aek~MOuD!$0y<3X^;u||3`2%z~k zGZ2G*LKSq(BA&;T>V}2ZFXiM9VmDnWgegbwX8dJ0OP6%?OC7W*x^xNj4?H0Q0^oc! z0t&!c){4L;UrWOfy9SyZ@gB2NcYISJ9fga1SQ`o)lt9bo_VFXMbV=^~yCkS@N)PX~tv+E9NO9}@p7gD)<*!=Nk?sx_p%~&68{H+G%_#AMBs$X!E|=l@ zrYyMHj!k=hvls?>L`R<1VDbR?z3rID{Hglh4B>+hbVIvFz7ZuKlARr?85L~yy7FoatjsDtJ{xCh!wnsoFs7C3ot zPf&&j^)2ZPR@}V8Ql^@2fqUqf5UMP>T~PU=0#vSO_cCV9aOKJUF_Wp4wV4i6ny-a$ z%1Pt)u`FVd{^d8XWyT}(BUP*|D)U94P2gL`Os9JRh)FNx_7k+U<( z@ASu@;OXps#_0=TYiC2(E_ZdZKcc`5`b*0o9gU8z-kd6dijpTVkYXS!VpJGpY{6fx zHMt(>2}LMGhC@A{eBFOEbeSt&kVH;(EO#N_Qjuicfrxl~9Q3dPnoc=UCfA&Io3%1U z$$H0Mtg=Mb^Pj1;gph|@b;>H#rpU)#i6l&w^L60;MCXuB7E`d73pbwbSu96E zZgj&FkSdh+P^*;9~#Ce>T_WK1mtJ_Z|&Gtj2m{&P~$&~&agmb=2fZlY9r zWY(zuKDe@F0~)bzNJOD2RD3TZ9G5@JZChgB`M$!Sb<% z`8b3RuJb|0Fx2@#-Xw6I7^5695`%Pu3L}{;Ze_V_WZDPqO(%2w0ib(Y^9^Z6eE=@& z0n1g?=08$vgKN~MPT=NHB0KzMDpiAc3m4M^DsfFjn-2GURwXJzC6(lSeQ;5Ml=oEU zpc|5zu2}v!o0P%>?Sjh<5}P)$-v#v9|IL#``eGBD(u;k*Gx975k|Yn9aE(PS!<+yUhL?-iThf^J}mqLo2iY`G=>daeAy zc6kWANAu+;UtNW?OmQ{Vphf>onE|g6l)!CJI+cGf;2I0H1^bax4LBRJj7Tl8R;B3y z`kO9z!R!x+j;c9=1!*R5O9@Ah7&`lbS1ve+ocpYEp9B>QT)8IB>#d)(uVPz$cAxoi zN+v5E6;TOZB^g+L2{i^Z9d{Qcp1ORlTf?T8e!vwY{H5pnxI=H?#`joNaHU#C)mO4O zc@wm6EBM$R?AAEQcC@HUxU3q_c}Bcy-ef0S4H0VI;H`W~6Onzks#@}80l2s%F(SF? zT0E@jQMevT38oBGEI7Q(g8u~0L%t3mc^@-Sr8?tQ9ieor4S>){#id0_g~%{SC$}24 zh(_q@7p$V8E|sF*vN#BQQ}3dL@T{?LBr-axhV#R}84erQ7isVbu>8V0NT3x!arW3A z)=h-!S$_ZF&|R>6FDkt+dDVsn!a@QaF7?#dH5rt57s@Eh&TF>m(J2 z8C)nV+`9UrXmw4ndf@s=stL53R%>#w{fNlpDM3g5MM`Mr|EZ6C2-m1g?Z5_Rlby{Y zLyu1i5wZ;bqb;Erq@o48A(Mp%s5rApkfu{(vkAb%j%oqH?yMtyY9Q}RX3j%d5bFaf zN%^jlqOb^VoWb%CN%!tQZHZWDkT+yN)Rv#TRKUuD8|UWJ(K2RlS~D7tYw=Wz9TsL8 z`+yo4k6|}m289Jr140g-Md^<_iYiMwX{=&|QHH!H03=dMk-m^-b!Y1F9Wck?Pl6zw zroJ$)#crqtIno;fR3=v}UTT6KMNBTew!>zA@qSbYj-USYyTM!#BUVb~UI!FyR@TG+ z*@cFgT?x@==mO_=j|IW$&ghFjq)_i2BWi}S<^kIjCQ(V+{YPI45UQ4;Z$JU++&~$J zSIz6Lv6phfYF}`rF+R zqcj9)xyvA;REWy=%@iWtAtl96_bXtGGEu2X_n|bj+NToK6FW6v5}=oTZzcS7q|qY@-{md}SrP7Um0_TENc)|*t#z8_7y zzmmoRKK^|waq=V|dzv~8#ndZqq_XCVh2|=lACE^v^+D=CB!NX_o-j6jI2>Tkz#}1v zZ?0T|R+NO5z#d*Lv7}AVxqiSJVAr_}=Ik_Xjx@>XLyQW#DByAj_#)sHwV4uLf4xRQ z*e}O1Hc0K%`=wgLZvM5j3p$DyGw;_ZU`#GpovqTK4pH1od`?Y{QIyINl8U;HxJ=wp z^U%lrbJn5i1zcnoH)9U*vI)LoQ4IR;KV3juRT=yFWcb9D4OFg2M%QC5#TWgTP2jHt zTK+!{2w}?bIPDKr;~I^rOGHN1U2e9>yTOHyB6d|g#v+6Kp-u*0n;F_&dbnXiYLj1w zo!$*yv}n_MNkek)F`lmTOj`8K;fF*^_e)95~n=4XwsNBVh0e#QG^J0lNJJ0LU#5 z<4e)-J2nRn4tPPJ(OP*Hy!e6yjXHq3w>(D;ft|N?Q@`B9D?qq0vCqGc^oN!dhhJ~* zLy2Yuw}DW}(6zF5JlbP%au^H+y(jRdwf|P+4aRyL13v1-dE9Cs6n~lIS(%jqM3o{c z^KN5u=sl0&Hr#fn`8nPQ^2`Y zpX$8%%wX*opk!I0o>f+HD&|MC>x@D`E{eaY!n{YWqC}sQU!9|VECo;jx0HVL8*o#@9@4qNc25#7kDCQ+&2|33*EJ>A)+ zesKrGzin!GeRKG`TV8)ucn=}(+Dlmcbo=m;q`g0y{8YQ+Sjn#Q-~IH{SBZDef3YLH z|Bo|Ac2?a;{%+R~KkRM)`>P`d{yhA}F?jgF3tqG_R)Hr?FKR3 zVWWb^*xaS?wOO0BPdj^v!2XcHCGt|_!{WH6dav;I?@~(FN%g#Uo4ywy8vGCrG_*BX zaBXs;gnf*5CQ;v%WGxV~{qjJ?bMxczHDJ==wr>unrd1Pi2~8S$UU$g^U9oVc@J$1z8>Bo6!r z=DWLBx%RQOQGR9Ko6iA~s_z2|J5jLPQ|5a+2EA!&wz)?c8n3xDo}FAd*Pw1Q?gEoj zvp^iDj{EQJpwnpgXcA^gdl&DrZ?iQL^|vnVK?OuUJX+RXv5LYc<41M%yef7Pmh5>K z>!h}hJ#k&M>|6Za6D|8_<#zD~dR{vX$});&we_}nFf6!70mHRDIAWhnV(Z{14spwU zB1+Z^qikw=#{Z=<|Liez2}nf|N6;)WP_nfytp%J5L<&mSw`e_kx2yUYa8p{HI%=%v^*f_<`Le*6uuFot^`M|QdjlNFP}@GG%<8Dj$)5*o%^ZB~QslO; z4O2b_dbTyCW4uS^SA%gsFF(*FIenYwhJ#8yhVq^pqh*>1$ zO)_aRVuwDzA^o1iZf9GYCRsjib8s(>s~+*(ZhJ8G9k=Ut*g4~U?c0$>~;2{)4hlUtFk6X z!$2`-sMvEjWLHzJ@#VOzxhk+S_Z2&ckYaBvP7sd21ei&{I-a!=HqMcWQO)D2h^1RJ z6M}Jh96>Ms^1dx6QU5Sii;~W6R&mtS($d@E;X4!-4$4l?_tn)5I3zig6LR$Hyk8^j ztRKs|k}H?VY-f{OTb6(g9Wz~Q)%Z(7pN7ucW&f?i4;~t|wmk${8&|_RZ{49hA#av3 zmBf%doVhu*5v?<~*OZYU^C699?ckd%c~KDv%_)`~C4A<&br+aNI!FoT9+#goW-TW7 z9_wgdr+wL#LXvUjXlZXQe2AtF?FS3XJ-^PT(yH-y5}i|ViFh5`Y1=m2=;W@gaT8o! zZ2{uYY%Itq>pm(|)yb$_o?y??_TXb%xA_Gaj0p7H!&kRjc#NoXNAluz?m2RkUU?J( zlUb>|_1Mkk+C&r`DOqs^oG07QQ!vLqPa{eYmCYpV%8R?plglL!J8k1j$!Q&(5uf&b z2fBpA z!7A#(aO^c}yCs5^45N}!S}Z%dx+D~`9SF?p2estI9B{zP5CT zKOb8f6@nSx?lWPIhQK}>Mpg)N1fx@WUe`|N>s8kO6=g9$2C|vWPcyP4eR&;cL!2^f zHZWFc{-T;sSmhgz%gm=nI5_}y?ymeP)&R26-IL= zm=GvO8C6cJj)Lp#XJGvaJUK~>`2Wy&yIWb}kV8swT(Mq!;7fdE^!(UTG(O0VS0;OY z?N$5cvX^Mbho1q1nPj!lVhK3B^loB_;)-ob`FeeQew4Ey7hfZvx9waW{{u8XG|W}- zmThdCE6u3-mg8<%uzXg~#qR2os^@4_Y{~UO2JKvn{m0=smigdwi<67XMD#GrqI~8P z(bcpDynE^5kt5r{RWqe>`dz|!M(>B+mOyV5t?s$T2owPu??n3vIy%046Y1DslP* zMgDPxCGZVfCMC)T^9!b-QrEV@PRivpWpH(TG$~Uqd}W2r4)NQvO0&Q3Pf`golR95i zybdIc+A@pu3P$v_lXxLX>>1hyUbWQ7jhrw1U`WAjnF)%R2lq~aTcMLc|E%qDk1(Ef z(ew2FjzS80fUd9E39{I3yBfC58b1j1*1o=v&_YnM9Xrf;rlMb=hu0(NH8{CvI~f-1 zc{zTwZBmRB5Tn1k1G%W)LGU;GWu-2|JfbYeg`B5X4!l4F*sG1drGN|;N%(p1IE+A@ zVhd=71RK0`>r}*%Q+Wh>^a&$Zu7XHh|YNjOSszPB4cNm9~CRWnFM_NxwJIyw>JjS zZ%DKFK7yL9uX0v3UPXTMH7C&-9FG7e*RMoaaiBPn@%2luzQq>C;w99wctfvO@6sbo znN9txujt=2G;ZJ68<=-jWxM7WXG)wmR&aNiErRZHpG@LM`5_nI1d_5 zZcCmLs2et7k&Dw1~d*#4y4h{4KnIR@3kp+$7%o zc<5$I%Tzd;f^yTa7L8{c?*ah0RZUO(cZx5i)Ut9%x@R5i4rv>|!-kB>T*)j|b*{@U zEpz5y*t`soHWedym+%5Wg5X(~fz=yy1npLaTo^s#^8SveaeJmKc=JvBx;Erid@wW5 z)(H3$htTEqy%Q&b7FGw?bc_Alc-p)ORMj!df{=E~cxv%?_8@`2SLHW_p@#a7XT$rQ zGud){;{*IK?41(btL0CqDk{sSTePBD0oTJBHFmFI6v%gw6No7(Ze5H?q8eSwSA$;c*<`${~$3J2irvyNt7; zjwkwdwKJyq-By*EVg}Dl5?8O^Y9a;vVxQ8N%1$g=91Up6FvO0nKOT)ElB1%_ONV1Q zQ^m21qw5}HqtU2`BWn%0zt%@5Oe^Sv6ih*w#pS- zFjKHRj*`|MxPFHo8$0`e&J3pS526RRrmu`n+wkH$x>wJ^Qd|3h;eGS=Pd1UzV6TQ( z{dOC+Y(5#bP^gwW|71RAwb3D2a>w|Jv!-4`_QV2RMq6)3U9llv$-d2Q>BzP$shJ-# z!byojt#&s|%vs1@GBqT~ZC%`cJkRP=c%X z@}Enqf(f<qOb=`!-1#FwIFcjah4#TbAR_EQs>JTo4HEo!?58@@rsBpHn{YjYqBw zI4m8_-dXW2m^iLKh3~ObE91JYc?re{YALt3mj1^FhNHS_%(6D_Pg7#UC7LCNPV0G8 z0-jVD7CBH{Wt^A|QRf9W+1I!488{}GE;VK`%#0`UfkNGS5(#ny5Lw0tKKcum**Dl& zfRQ8Y)5L5#_ONM6Zhhwct+9#jqgC0Yj}*-ol>*yP)OY50Tk|!L_mUU1mwJ zl3zhw9!hVzNG3=92H@{3egaP+Xz!id${$C}R2gOc>9nz6NMvY^H$|+mox-QHF|>Gm zT-|aCjR1DfY%ADmZzYn>syT)qv*b5ssd@UaPtIfR$4q<*h(WS09Cw-XYgU4r{u33O zzB~>N4zSj;$Q7*5<}T5Dw}{&V{-j-wt1GIJOzEEb zwOcuBO}!ehj(b4s>~EoHq&?H|WAB8;?7LG*%(y4@E6+$~m^6Q)tJBu(OIat3CZOfy znH=;3p)!XLktG|JF0RN#K6fx}gDL9-m`YTHWyLkLm?{C?jJDlq0??FMEes)=;2Z3t zrA}eqknp$n2) zm_GNhhIC?&9xZX+8uZw6aVk>4iw>nPxOi;WmcOwB;_`*KcCi*_Z(dNbGj8zad9i=Ofbwi<9BsBkH2@E(X!>bvJ}Jl|aaQ z=mq4k_3`+!Br{u>#`dEz-ld+&XyWGf)NL$J%WSLyH;V(ocz4XfH zdn~`)Sc{;O&;2&*Sb|H(_1y;D;q$K8ZyLi^DwWD%`-EbOPJZ? z{_*8t(CM{KkrB+9i&LQrz({98o2|PvAeF9-)>~~XGT@1f0IMMGEIUL-aMAGZdjIf_ z-%zA0i{oLk>-omtGIvxE06f}CYQmS5Ko?5b2wE-BxKpuAzas1M`1KDxB8YNA?1U;X zA(8eWPajzEH@v8^c=CevSnP8k+5#Ujs-W1zv*XFgRPHKNGg{9-P$um3lK|<3qgZO6 z0nVorKxx_tnDA6>VB{;Eob_j)V2If9J4G(y9H2HFP|FM!5~U1Qj-^D=-$^DE{3h-& z{US}ko7sWbq5OKGHQsRL)qL*yvoulA|BpES{|KR-459?^zgSQ44p@dXQm#tXWRWpq2 zbD%U~!}m0Ii=!a*Dp)@gD8#gs)x{^J4(@<}oQ2tVZJ_(U;*`1LIUw1BU}h^V@(u!b z_jqEM+)9Z9aEn8tBGuLtHJ=0m_9@J?=CH~^L?UN8alxi z6ui}85zQ0~zql=CrQB#0muHf2v+C0S;k71bBJdR1PkM2B@f!-|&Fy$H2RO&y0+0Mt zL)v;Uk0Zc2YC5br2?~I!)}ST|TY94TVPi&sax(y?w~kNn_Giv@YSGTqyn&kw=y*{r zkvqRJL|PhenAM|9l>g6lbcjPC_EB<6V8l^>`Y^2}yvWy&t*);c(2yK@^D^w1az#RO z=r_OKw>abB{X=NY-gcRn-+#ZG@y$iN;JWhL&!zs}sqxv@s!4ZSpTDL^Q1W5I&tf}dR#%`X_o{$1uFO3R&V`@KnIs1 zX*8MG%f^vxl5cta^tuih9jGkMe;6g74GqHlYD2CG@Kw`e{XxL%!?aVyd9eefXkR_5 z8zR+~cuMfBYBK1`N115p&|Rj#iyJLpiZ03&n4oP$G&&f6>@#*-7*vXTqW9T&jW{=> z(Z|L%WwKtelZ~Fl$E~C-y>knGkwZHMD+j6QHX?67vL}YwK=XM0kP)ihO#<55#(0K+wbcT0Sg3;@$F|P*%Rl_Lb13` zz{q|#A%#Ms+sGNmyJ5a)wh*V+U%p~vxt5{k7&Ez%p19?aywtJ9f^32bEDp4tw=HjZ z1F*&ok|yYXJ1ZEum7>4)cd>+QR4nE{KZ)}%id}CI-16kh{!F%3gAsPTa?maU1{bG> ze?O_uE{2Ce1~$ys{Ub#=k$ri*^oa8qi3Bn7f2a<$1u-B+LOK8<?L5VpoIk zCJ9urVdET7Tkw+)1Rzm2?Ao;C74Aq6*GXolrK&i!edNUK2G@}(*(+y^B=Hp6h;LMcDseY|1bz_6DJO8SOVJw7t_(u=QI^0xgt;~ExSIy z6kAxo>!E=-raS|->5Z6zU%beCeJ&a^trX-O{_dt+spO8`{9iC)?KuJPQ3BU#@tT14 zd%QL;bRW%&{s3BMk~@|cWB!jRa0K9vA@#^7qYmpnBj@}f#eJ+;@3V)X<-Sb?64LZe zP)lk1kpiCq4*&UnCHLB?-nQB@^p4hCG29v7018ZoODN6&h19AJrJ+;0^q1?|k{+vK z)DaeELZGLcS4BewfJJMePyfz7$cpzPb-w~FDWsPHddYll(A;AG z#Y3Q!vGcZ?7|}L&_+#|ltwUWtsk>VYt+IpiUZm~`A3)oA+wxb=l%l7r;10GCrv_$g z4#`C?pRqAs{V+?gN%#P6K5QSaVx}ZC1rBf4zI+(dd$rBYfw|+hoVbG!z(M*Di&p$! zfnE1m8?6a|3qq@5JN=pu>k_#c7MF6VpnK+6b2d=lI95Um-5Vi-x1b4Q~ae_a{7B@uHhZ zp6vKQ{Lzw!tdr!UK(C3O&(T+9069;$p=F>RfJB%ajSI@5lc|H**&)oxTY$2`_QI%g`Q0#8BJY=dcIqnI2A#}<64AX5$QSeR+#}-c!3w!MP8`>fcp3+M zqHUF(+!{q*eCui5a2Y>8F;;4|<9enToAwAhtxI(UE}+zz>@#>qLN>w`BcaX^$8YeX zD)e;3Nge9%AlenC8PrVtZn)0=oh|wBI|6=JhByEico2B^Gtd(J;(3t{(1;ZMXe!k= zwnFbS#pjm)hY#y2q;XbHQ6)}B$s;+_Wlh4rymc7Mi$HBYisdH(@KXw5RSI0ZXz|mNR!oLM9#w=oLFM)0IGV@?ol`$YI}32E^62Uq4_n>5zw_)VsZ1!e_Lm zZ@*y$J2&|e?p_LTfX^hJc~B;}b01B{vTUCOUHdSKes@2y3%uX9&6%G2o5pFp>y-=N zLvYS;nt%5!`(WJoV{6+8_4RKhYsp8WJu|P$ZNC%n1JHc%JsnNz{3md@J~22ZT+ltl zvypjrh%%k9^X~*~KB%&t70Vs9lZp%>T|BVoW%E&gxIp)(~jUnw(aWXS` zMB^+#ofdo#QINQmI}-9#R>Jih^XEH&ceiWr7>hIgSC=vRJxYK9uSK*FnqoKR6y$VQ zD%f9Xt0khup|6BvfM*94ffq%Ilxn(cxxplrd1ZDEOp!@MHG*qm+c?(z$`7LteqiiN zkv^#j-w#ih*7H{MZIfOxt|y{Q=~ooQY44+cHdYlG>Wwk`-<;s1C?~jr*-bzEhd4*` zQ04(E2S6*gD~ludf%)pG9m;NIHG4Xm(Q>}p?a+-hNGg=kA zK@t9nX;W7pYJDsYOWnQ?GRN)Bw<>E)d#X>xoe*(==ddpD)jvZd zgb&{NTmJ~Py0;&5$KnR*coXfkPbbt6>x<-zLA-guV*L|u4w%KyK{P4DMil1dUwdmc zz-!FgxB!zU&^TGStYr|22TI+lXFcbWNoDnU#}iQ%pn1EE*&h8eIjkn2Ifc1b_)-Vw zFzzS}*_~)~EiHRL0Kg2f2s7)J?KWIvkkjBlbw;|8~XGWsNg; zK?WVSn7Vo{5v6`uoY6P}+gwu6EfK*F1v#VHM!PZLQ(jPVX3*&&pC_odtMlxH5OGNK zuryG(diWuLL_cA!35nd`uI43;q1K?4?EhL)+4wC zJUy3{`wj6_Zyc3`8HktgxN-aSt}Rz*z}&xz)f9EfdcVO|0Fu+&`4K5jo*@xdkTYI> z_j@XQqZAC%5+{$kSq0r6sJm##Vn=?Yb|KbLm@mYu)7O>?!-l}4-Zes{v62s2{{?Pv zZ==HFg0dBV6y67xTV6t-&8zG%Ux@8SG{c2HP#(DN{=N)8ZEMz?FU}q2U;Qt4 zTZvO*_YNm2>Josk;&H`|1i8-4KJ_5B2ug05r zfKRP#EC0bhBHa<8zhIujrr<#HcD90V8f;rEVGJ7r5r9;KO(8N~-or)QLRTWq;3T5n z|EufD)A zV5&(D2%^`hXWEW4Kb$(oB=MU{*ab3EDqcLu=VaLp#;LINcJ#(*%+1XX+mO1fpxvMZJwK?{BS9>y zc31qFsCM`NN-aM26H{Ce<$)?zs49_Ttok&7tC|%74(EiefsRA)W-D}_=8RgB%v=z{ zG7`rjD8DpW*BF;AY*okH$wY|gIMg%Cu99z}oZY}Pr}u!)lh7iSUVuTH(0-8^uvl*= ze3<+3^6D(HtUP?vO9C@mQFyMZZVvcUfZEsqaH2FuvF^I?w);g8x~A&q#gVeasQ>yw zdZH)+obLQNASJJC8K+BAr$4jGJKjG}Ejp(&KVqjxx;xNs83J+R(f@Q1GTp6^>4Mgp zg0)(*cTZt{`eWy(-cjiL*A3)}494v5F%zbFn~>hH#2$g0SAYOaFjao+eNQ-K0kBtC z(0rO@3+Fl|49DHqODdifrBbUaPgF$cS%p@`VEyEj7oU1V;CX#KVUqK6@oMQ8hyM30 zfb7#nn8Bq(=d!xg;wdT(Hf3{2n%nYbE3P%Oe#Z4XF29rw_(fU0%1Z$ogJxXzs2bdW@UME3sp!< z&x3XPqDA?vA}ICEu-(F{w)=KLP|bJU7b&@ z*|F+lkHzytn(uukx}_`Q32)w-G_dUe!QUuI$S{!KRsI>_@Q|gf051;=Q~FRBs0J%4}MIzp4olYKPAh{+BU@KyIe#|b)mZ5?NARpvy zp==Y(6EzdhEkmm^u}-O`{YumI8mwHP-BLCFO~!0{EN$ez^R^|msSIiK^m1wJ$N{bM zb*BxpODAKUdEQ};6xw>Ab*9HU+mrJs#xo<~_M9I$^U=+)s!@NqiJ28c<%<(Uv3hF} zL&_dS%-1#!8ciCOAlOZ`5b=BHvn&5iKpitTyo+DPEIU?S6MT3e-pNH?ex;(~QLcib z{!g0kM~|GS)l^x{asS5X6-!w$Y}F5j2_|o6MXqAgcFJDn6GoQTeGAs#exPRX^c=@2 z#xLyvUB)P3aDLO}1NlB0A%Pq3uF4ANE1x;sHqt%RRnhE!w7q@#ObhcPbll+ychZ}x z7VZ&VIT2hp%tcCGW)KM!FYP8{t&Py#xD=Cmxa+w|Zs7;g-Fz@wEQ^IqBmFrq%Zwe& zF*Y#a31ghw#j+(B+2|Kf8j#jRs}=oGqnOyFs*o#_T8cUOv4&(dU8=WYY`{zsJrd7% zdEk>VXB^GP5+VR3|f~^468Fl`503 z8HNF^bi9~QC;U>N;0`2(R5CNa4>K%xMm$Zx9csBUZauYdM=5}%FxwfDysXTKlt9dCZ-0lT4HO9>{;X}$qBu|l z>x$BtwGl)wDTI2m@f8HBjTL=+0Jau(Je-^%a*d%MYHTo1m9Goi3bM~oBS+qe<-zCI zGQc#;34R9SS9R@*`wDgp&EAOc|&5 zm`PI7*RtxVLg!1?y0i#KRK`)ZMU{UQkn*w^{7XFF@H7%Hv@br46Tv^yAO5Hz=pu~v zglaM{KsD3)SV201_C28+3alZlpzyfmikjB+>3R6}W)UUG5SX#R$OEcJ4{ zDohX+z~-j%!l}LF{R|a!pNba8s*Dl^Q^iayTo%VT`4Zdg=f)2=6UEjb^o(dok8t$G z2ZS4mHp00LgRl3Ug@tW3BrfI{uf{F>O4TMoluQXGiDIZcTYeiVxOPODwb9N9%>NXk<`F(+`oLs=Plvj2c)WphfF8^%_;gA;L>bga6 zh9#3J^gUgJRPWFl^3oxW7phn*qxWgNTJ@`kVp+Qtt*le4LqTN`?WtC zZiAs{9Xqf?>w2W5ZbP+e;QI+jIZ}n_!&*Fz*>0e#4gr0-lMKvkY1g#`*D_ST7OS>K zixirapJ2YE{|B9AZVHLb*^p)GkUP$5Qhu-%%U3M@GzIsSf+8+1WqGplQW&TBC>`#C zUQ{>vCJpG&kCl@X;pmB(E#;Y)dh1?FMGLk$N6NbfBlFuGj|e5IQ}PeX-jZG|#HL?$ zeY3V88MezeXi!i=IY+%gC}DfYz3wK1+N=sFtQ+&F?pr?NR4Y1vCwB008NLTa=V?R& z?|p_r+*ZuE@;VO2Y%2huSLp(-@neD4QmmSO8{)pdr3;85mAlrD(U z#AoH;nvKsy^4>nHx z(xnRgtmS>8p)scQ6L(e5UBYJz4;0b}TI-Dy(ZfE}3N?j9;6a!vEfZ(d42 zM6^ajLu9))3fazN6Sh6Y52kskjFafbgJwpKIol3o-~y(f^C!=g9~zrV#0pXF?&Ibp z6MF8z5jCWu*6H@1x>u!o@JgMe#+Td!$WGp5GH4Q`#_xQou7Yi1(Jpx&CO3X$^<%!| z3SPK%=-a25VNw=OuMxavU8RVGMU~2*5nJm?@du=Q7Hl?0DiUkb(mV!Zh-At`=kd1W zGfpaDjX|2n`gl<^5p44*g&=I~>9*%?jc(P8wnHqn}_G1@{xk&AGWo5V^$+HX9bGW#YLxcU|iR(IAy_$B9P8e<}<&d?q*7Zz%&tr?}!^ zw`|+PCS^Nxk+D+L`l((6)T{t^hP{-|kktn)LDSA!>eGJ>*@3%ySC)UVYr zAIuV8Em9~l{qx&Xx@aRsrsa1YvUrIl)J?I_Ar z85kF_Is-KP(!L2*#$cySpKe=t(@f9#@j+($3Z>Ej8E^UZn4fhx!K9Tocf=3@N)u(3 z0k`JnR<08KEwt2f$0A>UFs;+0l+m%=V&=&<<`A#=?pa%P46l@-afKrZMS~?K?d3gg z%wurVDklO1f{`uYJ}*$&HWa4az16ca_(HX!A$F3M#z^0!qtghodp7Oo?DCqPEOq%N zOb*RA>*K@T6}bg$>+JE$@okvftsWnDBlh_HqR%7(DL=cgt2~}ft1HdG>siwoy$5Qu z!v?J8y+>D{M6DHbJRe)_XIB@RBuzXh{iQbh17@m5f3U0#9gHwO+}mJxr17_rhVjr< zXqIG^aQa=5UH16sJaa|cf+P1Y1V!tjg3af9XB87R$Nm=g&2tERr*XL@A#NyATOFOi zuB9;1VZYX+4ndRr-1049%Ef5Tq~B{PbJHx~)?IJUXA?J_X z+B;8pHM?y3FYA%JHT^mLjxN$8g~;%ivy-^>Ij*6CHYXt zP5uWCsAzTQ^l@7f{ktoe6L}7i%EeKe7}m(Kudh~I0RZTOzxhlx+^N~}q2WNS$Fm(| zUz6O_d24u~R^m4$-!!Y(Ep(>;OmF4nlLO4LHZQp9M&UoqvJV;+_HB*14=eN~{zPLO zd_rkDUOQpYUzZ$g&6!tM(D@%pOc$`?=wDx`O*4Y35 z8a|Dtzq-(^%)@%3{MR}qX8!|T(8ds2fb&foQB@{Dd_B zO!RRFRmqvEWcE>B-aekXY0YNmJMGp!Z=Z5H*Go=SUIE;^UK+UOeP_#s%|*TsyWyCW zdkH5|@}bg&IcJK^Y<-_*1T#gy5BPcbmxqa$t+d;eyF@~moBEitu!#fv&CI@vmsul( zmPkib5|}O_Nt6B~=c}7$wiPdzd)kllG{zI<*(-j{kvwn5{pkx~MN3AVbp_{TRA``T zh%hAPG;(HRhnM>U?(>m%qP4|II$rI86?@vM`}zAM2kyG?uE?l(aD26W8*prC$?rz& za!IKN-_3!OniB*Jx17$f6^zXee5en zUmFE#fd+7Go8*JKo;(pbb8gk@PCiy*9-dw-GwRQor-$*hW4!{qw>@J;4kNwTvAR1` z5336GH61CNDRU`1TRgcDUr{+%w9>KVcfI(%z_R zlc>o~8jaXwU)d75<&cRq{~Xq;*Z&U24R}dbBy<%qWX;f$fXX;}03eE88 z;81h>jrUhHrnBYQ33!F{{m(pJ)0ZlTnm!DayZ+bby0GJew2%np;_0n^_p`6nJr8d8 zvhq3h8wP9$31ZokZzs(A>Q+={mR#yXRV@Hj4+-*Tk(LtmYkoYO@2 zz>LR@a;(h`UY#2}aQN=BwZfm;9>V7`CB0Z)Aw)Qy-~Bjpy!kH^4dGemizQdgFn#q; z_rbSt|IW)dFI5W>x`bnvGradHB>Z0OToL4vdmwp&53s?7ipaRSFz(1@i~S>4w_4FB z3&jySSX5LQuLr+7r^FwOepeoQ;%dnO^tgzlGuWizqKcIlb5!h;n0VtxX{N0qb}ENv zcplJQ6DYXhU^jg=UxQlR)@Ps!*Q{ZBO?gaYjfFjj1y~M?itw^@fk1pJDxdxM;BpQB zdvd`F2A+Xvqj$ADpeC1GI<|3WxP2s{iGB$?R!z%x0@Om?OWuUPm>l!j;}gCJ8@J#u zP~y#p$wOO@bm+Z+JLkj1F}txejmGe7RZtH)KEsQgah2w8Rl~4wP5h*VcjHp8N_N8` zC2ioMNVNXScBZB`)n!y4AKcNYl-4hQ9DU+iAQjsWFIQWvfKF9(iX@n>^Tc@nC_goZO>b7n=f}6`a-lQ0`qH*q}QyG9lHV z?)zV3-%MY)V5mJS;9cv2>e@Qw)FAhv<==67sYm^lk~gpCd)Gud{cYr_hF}%GMy2@J z^U4|aj5z)^)7wYnjGSnyGS+6BwCD|0SJaRCGJTBJp-uPFC{zFcYACHLW&rkhR%V&G1J7c;(w5?{PI8<&0Dp*w+E_~!s zdStlcJioP_?IX<#L)v5eZ`ZE18xTH9x*-TN;p>ImK@3Kl2BErVi6i^l3Xb_IA7k8y zFYr`jf;p7ndy@vc7uPi_muz>PLII$yy7T7Mt+DiL$z}5Uwz0L5o-uc3_eYG1l9+{? zkG3=?d;3@lun|40sQC}^0<6q98eRQqe_=`rPes|D57P@cdjIv}d5p}vLA7l^&0UP7 zLO16cdb(Xr@_!ZE>hofY;`Y7uZ9nN?f+>rNE8jgUeq5R~^L>H4$OUl>T5;MZpvHR2 z4;&t-9ALXX#Lclto`0ouqEtgBtHPseSARqv<{J9nf-{zD4-E8%M~-;=M5Ffq!ci1+ zb)t4ou=|#xQLFh*x<2=JVNOId&>FgzGd(^Q%lvYo67D|SzvR8uH`L#~oGi+u4kM0)bM!$HEQb9^=(@7F{`fn4R8_!6wefn zJ&9`Z_L07YLB8u3QH)F5f`-ZHQYTp~+Gt;zA`5lOj-BEB#c6SvgG4{5&2B^gG~3X# z?;wWmQZ4pjbxaAa6gHuW>g#P6&{G|r_%QwGa qMtrhg?m)j(4hru=N0F7%4DFp_#0~c)U;jd_Y^ + + + + + + + + + + + + + + + + + + + + + + diff --git a/web/src/app/public/slack_monochrome_black.svg b/web/src/app/public/slack_monochrome_black.svg new file mode 100644 index 0000000000..2e1378f8c5 --- /dev/null +++ b/web/src/app/public/slack_monochrome_black.svg @@ -0,0 +1,27 @@ + + + + + + + + + + + + + + + + + + + + + + diff --git a/web/src/app/reducers/alerts.js b/web/src/app/reducers/alerts.js new file mode 100644 index 0000000000..1081572f5f --- /dev/null +++ b/web/src/app/reducers/alerts.js @@ -0,0 +1,32 @@ +import { SET_ALERTS_CHECKED, SET_ALERTS_ACTION_COMPLETE } from '../actions' + +const initialState = () => { + return { + actionComplete: false, + checkedAlerts: [], + } +} + +/* + * Updates state depending on what action type given + * + * Returns the immutable final state afterwards (reduce) + */ +export default function alertsReducer(state = initialState(), action) { + switch (action.type) { + case SET_ALERTS_CHECKED: { + return { + ...state, + checkedAlerts: action.payload, + } + } + case SET_ALERTS_ACTION_COMPLETE: { + return { + ...state, + actionComplete: action.payload, + } + } + } + + return state +} diff --git a/web/src/app/reducers/auth.js b/web/src/app/reducers/auth.js new file mode 100644 index 0000000000..4948240d1e --- /dev/null +++ b/web/src/app/reducers/auth.js @@ -0,0 +1,14 @@ +import { AUTH_LOGOUT } from '../actions/auth' + +const initialState = () => ({ + valid: true, +}) + +export default function authReducer(state = initialState(), action) { + switch (action.type) { + case AUTH_LOGOUT: + return { ...state, valid: false } + } + + return state +} diff --git a/web/src/app/reducers/index.js b/web/src/app/reducers/index.js new file mode 100644 index 0000000000..d839498f0c --- /dev/null +++ b/web/src/app/reducers/index.js @@ -0,0 +1,14 @@ +import { combineReducers } from 'redux' +import alerts from './alerts' +import main from './main' +import auth from './auth' +import { connectRouter } from 'connected-react-router' + +export default history => + combineReducers({ + router: connectRouter(history), + + auth, // auth status + alerts, // reducer for filters on alerts list + main, // reducer for new user setup flag + }) diff --git a/web/src/app/reducers/main.js b/web/src/app/reducers/main.js new file mode 100644 index 0000000000..b831d34044 --- /dev/null +++ b/web/src/app/reducers/main.js @@ -0,0 +1,23 @@ +import { SET_SHOW_NEW_USER_FORM } from '../actions' +import { getParameterByName } from '../util/query_param' + +const initialState = { + isFirstLogin: getParameterByName('isFirstLogin') === '1', +} + +/* + * Updates state depending on what action type given + * + * Returns the immutable final state afterwards (reduce) + */ +export default function mainReducer(state = initialState, action = {}) { + switch (action.type) { + case SET_SHOW_NEW_USER_FORM: + return { + ...state, + isFirstLogin: action.payload, + } + default: + return state + } +} diff --git a/web/src/app/reduxStore.js b/web/src/app/reduxStore.js new file mode 100644 index 0000000000..e56a84fc1d --- /dev/null +++ b/web/src/app/reduxStore.js @@ -0,0 +1,16 @@ +import thunk from 'redux-thunk' +import createRootReducer from './reducers' +import { composeWithDevTools } from 'redux-devtools-extension' +import { applyMiddleware, createStore } from 'redux' +import { routerMiddleware } from 'connected-react-router' +import history from './history' + +export default createStore( + createRootReducer(history), + composeWithDevTools({})( + applyMiddleware( + thunk, + routerMiddleware(history), // for dispatching history actions + ), + ), +) diff --git a/web/src/app/rhl.js b/web/src/app/rhl.js new file mode 100644 index 0000000000..a835a6c947 --- /dev/null +++ b/web/src/app/rhl.js @@ -0,0 +1,4 @@ +import { setConfig, hot } from 'react-hot-loader' + +setConfig({ logLevel: 'error' }) +global.hot = hot diff --git a/web/src/app/rotations/RotationAddUserDialog.js b/web/src/app/rotations/RotationAddUserDialog.js new file mode 100644 index 0000000000..887ae03d63 --- /dev/null +++ b/web/src/app/rotations/RotationAddUserDialog.js @@ -0,0 +1,83 @@ +import React from 'react' +import { PropTypes as p } from 'prop-types' +import { graphql2Client } from '../apollo' +import gql from 'graphql-tag' +import { Mutation } from 'react-apollo' +import { fieldErrors, nonFieldErrors } from '../util/errutil' +import UserForm from './UserForm' +import FormDialog from '../dialogs/FormDialog' + +const mutation = gql` + mutation($input: UpdateRotationInput!) { + updateRotation(input: $input) + } +` + +export default class RotationAddUserDialog extends React.Component { + static propTypes = { + rotationID: p.string.isRequired, + userIDs: p.array.isRequired, + onClose: p.func.isRequired, + } + + state = { + value: null, + errors: [], + } + + render() { + const defaultValue = { + users: [], + } + + return ( + ['rotationUsers']} + > + {(commit, status) => this.renderDialog(defaultValue, commit, status)} + + ) + } + + renderDialog(defaultValue, commit, status) { + const { value } = this.state + const { loading, error } = status + const fieldErrs = fieldErrors(error) + + // append to users array from selected users + let users = [] + const userIDs = (value && value.users) || defaultValue.users + + this.props.userIDs.forEach(u => users.push(u)) + userIDs.forEach(u => users.push(u)) + return ( + { + return commit({ + variables: { + input: { + id: this.props.rotationID, + userIDs: users, + }, + }, + }).then(() => this.props.onClose()) + }} + form={ + this.setState({ value })} + /> + } + /> + ) + } +} diff --git a/web/src/app/rotations/RotationCreateDialog.js b/web/src/app/rotations/RotationCreateDialog.js new file mode 100644 index 0000000000..4d4f8743ff --- /dev/null +++ b/web/src/app/rotations/RotationCreateDialog.js @@ -0,0 +1,82 @@ +import React from 'react' +import { Redirect } from 'react-router' +import { graphql2Client } from '../apollo' +import gql from 'graphql-tag' +import { Mutation } from 'react-apollo' +import { nonFieldErrors, fieldErrors } from '../util/errutil' +import FormDialog from '../dialogs/FormDialog' +import RotationForm from './RotationForm' +import { DateTime } from 'luxon' + +const mutation = gql` + mutation($input: CreateRotationInput!) { + createRotation(input: $input) { + id + name + description + start + timeZone + type + shiftLength + } + } +` + +export default class RotationCreateDialog extends React.PureComponent { + state = { + value: { + name: '', + description: '', + timeZone: Intl.DateTimeFormat().resolvedOptions().timeZone, + type: 'daily', + start: DateTime.local() + .startOf('hour') + .toISO(), + shiftLength: 1, + }, + } + + render() { + return ( + + {(commit, status) => this.renderDialog(commit, status)} + + ) + } + + renderDialog(commit, status) { + const { loading } = status + if (status.data && status.data.createRotation) { + return ( + + ) + } + + return ( + { + return commit({ + variables: { + input: { + timeZone: this.state.value.timeZone, + ...this.state.value, + }, + }, + }) + }} + form={ + this.setState({ value })} + /> + } + /> + ) + } +} diff --git a/web/src/app/rotations/RotationDeleteDialog.js b/web/src/app/rotations/RotationDeleteDialog.js new file mode 100644 index 0000000000..e5b45c388b --- /dev/null +++ b/web/src/app/rotations/RotationDeleteDialog.js @@ -0,0 +1,83 @@ +import React from 'react' +import p from 'prop-types' +import { graphql2Client } from '../apollo' +import gql from 'graphql-tag' +import { Mutation } from 'react-apollo' +import { nonFieldErrors } from '../util/errutil' +import { Redirect } from 'react-router' +import Query from '../util/Query' +import FormDialog from '../dialogs/FormDialog' + +const query = gql` + query($id: ID!) { + rotation(id: $id) { + id + name + description + start + timeZone + type + } + } +` +const mutation = gql` + mutation($input: [TargetInput!]!) { + deleteAll(input: $input) + } +` + +export default class RotationDeleteDialog extends React.PureComponent { + static propTypes = { + rotationID: p.string.isRequired, + onClose: p.func, + } + + render() { + return ( + this.renderMutation(data.rotation)} + /> + ) + } + + renderMutation(rotData) { + return ( + + {(commit, status) => this.renderDialog(rotData, commit, status)} + + ) + } + + renderDialog(rotData, commit, mutStatus) { + const { loading, error, data } = mutStatus + if (data && data.deleteAll) { + return + } + return ( + { + const input = [ + { + type: 'rotation', + id: this.props.rotationID, + }, + ] + return commit({ + variables: { + input, + }, + }) + }} + /> + ) + } +} diff --git a/web/src/app/rotations/RotationDetails.js b/web/src/app/rotations/RotationDetails.js new file mode 100644 index 0000000000..ebcf7e93e6 --- /dev/null +++ b/web/src/app/rotations/RotationDetails.js @@ -0,0 +1,112 @@ +import React from 'react' +import p from 'prop-types' +import gql from 'graphql-tag' +import PageActions from '../util/PageActions' +import Query from '../util/Query' +import OtherActions from '../util/OtherActions' +import CreateFAB from '../lists/CreateFAB' +import { handoffSummary } from './util' +import DetailsPage from '../details/DetailsPage' +import RotationEditDialog from './RotationEditDialog' +import RotationDeleteDialog from './RotationDeleteDialog' +import RotationUserList from './RotationUserList' +import RotationAddUserDialog from './RotationAddUserDialog' + +const query = gql` + query rotationDetails($rotationID: ID!) { + rotation(id: $rotationID) { + id + name + description + activeUserIndex + userIDs + type + shiftLength + timeZone + start + } + } +` + +const partialQuery = gql` + query($rotationID: ID!) { + rotation(id: $rotationID) { + id + name + description + } + } +` + +export default class RotationDetails extends React.PureComponent { + static propTypes = { + rotationID: p.string.isRequired, + } + + state = { + value: null, + edit: false, + delete: false, + addUser: false, + } + + render() { + return ( + + ) + } + + renderData = ({ data }) => { + const summary = handoffSummary(data.rotation) + return ( + + + this.setState({ edit: true }), + }, + { + label: 'Delete Rotation', + onClick: () => this.setState({ delete: true }), + }, + ]} + /> + + } + /> + + this.setState({ addUser: true })} /> + {this.state.addUser && ( + this.setState({ addUser: false })} + /> + )} + {this.state.edit && ( + this.setState({ edit: false })} + rotationID={this.props.rotationID} + /> + )} + {this.state.delete && ( + this.setState({ delete: false })} + rotationID={this.props.rotationID} + /> + )} + + ) + } +} diff --git a/web/src/app/rotations/RotationEditDialog.js b/web/src/app/rotations/RotationEditDialog.js new file mode 100644 index 0000000000..d97ae3625b --- /dev/null +++ b/web/src/app/rotations/RotationEditDialog.js @@ -0,0 +1,102 @@ +import React from 'react' +import p from 'prop-types' +import { graphql2Client } from '../apollo' +import gql from 'graphql-tag' +import { Mutation } from 'react-apollo' +import { fieldErrors, nonFieldErrors } from '../util/errutil' +import Query from '../util/Query' +import FormDialog from '../dialogs/FormDialog' +import RotationForm from './RotationForm' + +const query = gql` + query($id: ID!) { + rotation(id: $id) { + id + name + description + timeZone + type + shiftLength + start + } + } +` + +const mutation = gql` + mutation($input: UpdateRotationInput!) { + updateRotation(input: $input) + } +` + +export default class RotationEditDialog extends React.PureComponent { + static propTypes = { + rotationID: p.string.isRequired, + onClose: p.func, + } + + state = { + value: null, + } + + render() { + return ( + this.renderMutation(data.rotation)} + /> + ) + } + + renderMutation(data) { + return ( + + {(...args) => this.renderForm(data, ...args)} + + ) + } + + renderForm = (data, commit, status) => { + return ( + + commit({ + variables: { + input: { + id: this.props.rotationID, + ...this.state.value, + }, + }, + }) + } + form={ + this.setState({ value })} + /> + } + /> + ) + } +} diff --git a/web/src/app/rotations/RotationForm.js b/web/src/app/rotations/RotationForm.js new file mode 100644 index 0000000000..55124c5b2b --- /dev/null +++ b/web/src/app/rotations/RotationForm.js @@ -0,0 +1,162 @@ +import React from 'react' +import p from 'prop-types' +import { FormContainer, FormField } from '../forms' +import { TimePicker } from 'material-ui-pickers' +import { TimeZoneSelect } from '../selection' +import { TextField, Grid, MenuItem } from '@material-ui/core' +import { startCase } from 'lodash-es' +import { DateTime, Info } from 'luxon' + +const rotationTypes = ['hourly', 'daily', 'weekly'] + +export default class RotationForm extends React.PureComponent { + static propTypes = { + value: p.shape({ + name: p.string.isRequired, + description: p.string.isRequired, + timeZone: p.string.isRequired, + type: p.oneOf(rotationTypes).isRequired, + shiftLength: p.number.isRequired, + start: p.string.isRequired, + }).isRequired, + + errors: p.arrayOf( + p.shape({ + field: p.oneOf([ + 'name', + 'description', + 'timeZone', + 'type', + 'start', + 'shiftLength', + ]).isRequired, + message: p.string.isRequired, + }), + ), + + onChange: p.func.isRequired, + } + + onChange = values => { + if (!this.props.onChange) return + this.props.onChange({ + ...values, + }) + } + + render() { + return ( + + + + + + + + + + + + + + {rotationTypes.map(type => ( + + {startCase(type)} + + ))} + + + + + + + value.toISO()} + /> + + {this.props.value.type === 'weekly' && this.renderDayOfWeekField()} + + + ) + } + + renderDayOfWeekField() { + return ( + + this.setDayOfWeek(e.target.value)} + > + {Info.weekdaysFormat('long').map((day, idx) => { + return ( + + {day} + + ) + })} + + + ) + } + + dayOfWeek() { + const { start, timeZone } = this.props.value + return DateTime.fromISO(start, { zone: timeZone }).weekday + } + + setDayOfWeek(weekday) { + const { start, timeZone, ...other } = this.props.value + this.props.onChange({ + ...other, + timeZone, + start: DateTime.fromISO(start, { zone: timeZone }) + .set({ weekday }) + .toISO(), + }) + } +} diff --git a/web/src/app/rotations/RotationRouter.js b/web/src/app/rotations/RotationRouter.js new file mode 100644 index 0000000000..de1be0e1ec --- /dev/null +++ b/web/src/app/rotations/RotationRouter.js @@ -0,0 +1,53 @@ +import React from 'react' +import gql from 'graphql-tag' +import { Switch, Route } from 'react-router-dom' +import { PageNotFound } from '../error-pages/Errors' +import RotationDetails from './RotationDetails' +import RotationCreateDialog from './RotationCreateDialog' +import SimpleListPage from '../lists/SimpleListPage' + +const query = gql` + query rotationsQuery($input: RotationSearchOptions) { + data: rotations(input: $input) { + nodes { + id + name + description + } + pageInfo { + hasNextPage + endCursor + } + } + } +` + +export default class RotationRouter extends React.PureComponent { + render() { + return ( + + + ( + + )} + /> + + + ) + } + + renderList = () => ( + ({ + title: n.name, + subText: n.description, + url: n.id, + })} + createForm={} + /> + ) +} diff --git a/web/src/app/rotations/RotationSetActiveDialog.js b/web/src/app/rotations/RotationSetActiveDialog.js new file mode 100644 index 0000000000..7ade054e64 --- /dev/null +++ b/web/src/app/rotations/RotationSetActiveDialog.js @@ -0,0 +1,82 @@ +import React from 'react' +import p from 'prop-types' +import gql from 'graphql-tag' +import { graphql2Client } from '../apollo' +import Query from '../util/Query' +import { Mutation } from 'react-apollo' +import FormDialog from '../dialogs/FormDialog' + +const query = gql` + query($id: ID!) { + rotation(id: $id) { + id + users { + id + name + } + activeUserIndex + } + } +` + +const mutation = gql` + mutation($input: UpdateRotationInput!) { + updateRotation(input: $input) + } +` +export default class RotationSetActiveDialog extends React.PureComponent { + static propTypes = { + rotationID: p.string.isRequired, + userIndex: p.number.isRequired, + onClose: p.func.isRequired, + } + + render() { + return ( + this.renderMutation(data.rotation)} + /> + ) + } + + renderMutation(data) { + return ( + + {commit => this.renderDialog(data, commit)} + + ) + } + + renderDialog(data, commit) { + const { users } = data + + return ( + { + return commit({ + variables: { + input: { + id: this.props.rotationID, + activeUserIndex: this.props.userIndex, + }, + }, + }) + }} + /> + ) + } +} diff --git a/web/src/app/rotations/RotationUserDeleteDialog.js b/web/src/app/rotations/RotationUserDeleteDialog.js new file mode 100644 index 0000000000..08ccc953da --- /dev/null +++ b/web/src/app/rotations/RotationUserDeleteDialog.js @@ -0,0 +1,84 @@ +import React from 'react' +import p from 'prop-types' +import gql from 'graphql-tag' +import { graphql2Client } from '../apollo' +import Query from '../util/Query' +import { Mutation } from 'react-apollo' +import FormDialog from '../dialogs/FormDialog' + +const query = gql` + query($id: ID!) { + rotation(id: $id) { + id + userIDs + users { + id + name + } + activeUserIndex + } + } +` + +const mutation = gql` + mutation($input: UpdateRotationInput!) { + updateRotation(input: $input) + } +` +export default class RotationUserDeleteDialog extends React.PureComponent { + static propTypes = { + rotationID: p.string.isRequired, + userIndex: p.number.isRequired, + onClose: p.func.isRequired, + } + + render() { + return ( + this.renderMutation(data.rotation)} + /> + ) + } + + renderMutation(data) { + return ( + + {commit => this.renderDialog(data, commit)} + + ) + } + + renderDialog(data, commit) { + const { userIDs, users } = data + const { rotationID, userIndex, onClose } = this.props + + return ( + { + return commit({ + variables: { + input: { + id: rotationID, + userIDs: userIDs.filter((id, index) => index !== userIndex), + }, + }, + }) + }} + /> + ) + } +} diff --git a/web/src/app/rotations/RotationUserList.js b/web/src/app/rotations/RotationUserList.js new file mode 100644 index 0000000000..970db305ce --- /dev/null +++ b/web/src/app/rotations/RotationUserList.js @@ -0,0 +1,202 @@ +import React from 'react' +import p from 'prop-types' +import gql from 'graphql-tag' +import { graphql2Client } from '../apollo' +import FlatList from '../lists/FlatList' +import Query from '../util/Query' +import Card from '@material-ui/core/Card' +import CardContent from '@material-ui/core/CardContent' +import CardHeader from '@material-ui/core/CardHeader' +import { reorderList, calcNewActiveIndex } from './util' +import { Mutation } from 'react-apollo' +import OtherActions from '../util/OtherActions' +import CountDown from '../util/CountDown' +import RotationSetActiveDialog from './RotationSetActiveDialog' +import RotationUserDeleteDialog from './RotationUserDeleteDialog' +import { DateTime } from 'luxon' +import { UserAvatar } from '../util/avatar' + +const rotationUsersQuery = gql` + query rotationUsers($id: ID!) { + rotation(id: $id) { + id + users { + id + name + } + activeUserIndex + nextHandoffTimes + } + } +` + +const mutation = gql` + mutation updateRotation($input: UpdateRotationInput!) { + updateRotation(input: $input) + } +` + +export default class RotationUserList extends React.PureComponent { + static propTypes = { + rotationID: p.string.isRequired, + } + + state = { + deleteIndex: null, + setActiveIndex: null, + } + + render() { + return ( + + + + + this.renderMutation(data)} + variables={{ id: this.props.rotationID }} + /> + + + {this.state.deleteIndex !== null && ( + this.setState({ deleteIndex: null })} + /> + )} + {this.state.setActiveIndex !== null && ( + this.setState({ setActiveIndex: null })} + /> + )} + + ) + } + + renderMutation(data) { + return ( + + {commit => this.renderList(data, commit)} + + ) + } + + renderList(data, commit) { + const { users, activeUserIndex, nextHandoffTimes } = data.rotation + + // duplicate first entry + const _nextHandoffTimes = (nextHandoffTimes || []) + .slice(0, 1) + .concat(nextHandoffTimes) + const handoff = users.map((u, index) => { + const handoffIndex = + (index + (users.length - activeUserIndex)) % users.length + const time = _nextHandoffTimes[handoffIndex] + if (!time) { + return null + } + + if (index === activeUserIndex) { + return ( + + ) + } else { + return ( + 'Starts at ' + + DateTime.fromISO(time).toLocaleString(DateTime.TIME_SIMPLE) + + ' ' + + DateTime.fromISO(time).toRelativeCalendar() + ) + } + }) + + return ( + ({ + title: u.name, + id: u.id, + highlight: index === activeUserIndex, + icon: , + subText: handoff[index], + action: ( + this.setState({ setActiveIndex: index }), + }, + { + label: 'Remove', + onClick: () => this.setState({ deleteIndex: index }), + }, + ]} + /> + ), + }))} + onReorder={(...args) => { + let updatedUsers = reorderList(users.map(u => u.id), ...args) + const newActiveIndex = calcNewActiveIndex(activeUserIndex, ...args) + const params = { id: this.props.rotationID, userIDs: updatedUsers } + + if (newActiveIndex !== -1) { + params.activeUserIndex = newActiveIndex + } + + return commit({ + variables: { input: params }, + update: (cache, response) => { + if (!response.data.updateRotation) { + return + } + const data = cache.readQuery({ + query: rotationUsersQuery, + variables: { id: this.props.rotationID }, + }) + + const users = reorderList(data.rotation.users, ...args) + + cache.writeQuery({ + query: rotationUsersQuery, + variables: { id: this.props.rotationID }, + data: { + ...data, + rotation: { + ...data.rotation, + activeUserIndex: + newActiveIndex === -1 + ? data.rotation.activeUserIndex + : newActiveIndex, + users, + }, + }, + }) + }, + optimisticResponse: { + __typename: 'Mutation', + updateRotation: true, + }, + }) + }} + /> + ) + } +} diff --git a/web/src/app/rotations/RotationUserListItem.js b/web/src/app/rotations/RotationUserListItem.js new file mode 100644 index 0000000000..90f6e40fe3 --- /dev/null +++ b/web/src/app/rotations/RotationUserListItem.js @@ -0,0 +1,88 @@ +import React from 'react' +import { PropTypes as p } from 'prop-types' +import ListItem from '@material-ui/core/ListItem' +import ListItemText from '@material-ui/core/ListItemText' +import { UserAvatar } from '../util/avatar' +import withStyles from '@material-ui/core/styles/withStyles' +import RotationUpdateDialog from './RotationUpdateDialog' +import OtherActions from '../util/OtherActions' + +const styles = { + activeUser: { + borderLeft: '6px solid #93ed94', + background: '#defadf', + width: '100%', + marginLeft: '0', + marginRight: '0', + }, + participantDragging: { + backgroundColor: '#ebebeb', + }, +} + +@withStyles(styles) +export default class RotationUserListItem extends React.PureComponent { + static propTypes = { + rotationID: p.string.isRequired, + userIDs: p.array.isRequired, + user: p.object, + index: p.number, + activeUserIndex: p.number.isRequired, + } + + state = { + active: -1, + delete: false, + } + + getActiveStyle = index => { + const { activeUserIndex, classes } = this.props + + // light green left border, lighter green background + return activeUserIndex === index ? classes.activeUser : null + } + + render() { + const { index, user } = this.props + return ( + + + + + this.setState({ active: index }), + }, + { + label: 'Remove', + onClick: () => this.setState({ delete: user.id }), + }, + ]} + /> + + + {this.state.active !== -1 && ( + this.setState({ active: -1 })} + activeUserIndex={this.state.active} + userIDs={this.props.userIDs} + /> + )} + {this.state.delete && ( + this.setState({ delete: false })} + userID={this.state.delete} + userIDs={this.props.userIDs} + /> + )} + + ) + } +} diff --git a/web/src/app/rotations/UserForm.js b/web/src/app/rotations/UserForm.js new file mode 100644 index 0000000000..f44595da1f --- /dev/null +++ b/web/src/app/rotations/UserForm.js @@ -0,0 +1,35 @@ +import React from 'react' +import { PropTypes as p } from 'prop-types' +import { FormContainer, FormField } from '../forms' +import { UserSelect } from '../selection' +import { Grid } from '@material-ui/core' + +export default class UserForm extends React.Component { + static propTypes = { + errors: p.array, + onChange: p.func, + disabled: p.bool, + value: p.shape({ + users: p.arrayOf(p.string), + }).isRequired, + } + + render() { + return ( + + + + + + ) + } +} diff --git a/web/src/app/rotations/util.js b/web/src/app/rotations/util.js new file mode 100644 index 0000000000..ccd13a6fd2 --- /dev/null +++ b/web/src/app/rotations/util.js @@ -0,0 +1,112 @@ +import { DateTime } from 'luxon' + +// calcNewActiveIndex returns the newActiveIndex for a swap operation +// -1 will be returned if there was no change +export function calcNewActiveIndex(oldActiveIndex, oldIndex, newIndex) { + if (oldIndex === newIndex) { + return -1 + } + if (oldActiveIndex === oldIndex) { + return newIndex + } + + if (oldIndex > oldActiveIndex && newIndex <= oldActiveIndex) { + return oldActiveIndex + 1 + } + + if (oldIndex < oldActiveIndex && newIndex >= oldActiveIndex) { + return oldActiveIndex - 1 + } + return -1 +} + +// formatTime returns the formatted time with the timezone (if different than local timezone) +export function formatTime(time, tz) { + const schedTime = DateTime.fromISO(time, { zone: tz }).toLocaleString( + DateTime.TIME_SIMPLE, + ) + + var localTime = DateTime.fromISO(time).toLocaleString(DateTime.TIME_SIMPLE) + + if (schedTime === localTime) { + return `${schedTime} ${tz}` + } + + return `${schedTime} ${tz} (${localTime} local)` +} + +// formatDay returns the day given a time and timezone +export function formatDay(time, tz) { + const day = DateTime.fromISO(time, { zone: tz }).weekdayLong + const localDay = DateTime.fromISO(time).weekdayLong + + if (day === localDay) { + return `${day}` + } + + return `${day} (${localDay})` +} + +// formatWeeklySummary returns the summary for a weekly rotation +// taking into consideration extra formatting needed if timezone does not match with local timezone +export function formatWeeklySummary(shiftLength, start, tz) { + let details = '' + const day = DateTime.fromISO(start, { zone: tz }).weekdayLong + const schedTime = DateTime.fromISO(start, { zone: tz }).toLocaleString( + DateTime.TIME_SIMPLE, + ) + const localDay = DateTime.fromISO(start).weekdayLong + const localTime = DateTime.fromISO(start).toLocaleString(DateTime.TIME_SIMPLE) + + details += 'Hands off ' + details += shiftLength === 1 ? 'weekly on' : `every ${shiftLength} weeks on` + details += ` ${day}` + ' at ' + schedTime + ' ' + tz + + if (day !== localDay || schedTime !== localTime) { + details += ' (' + localDay + ' at ' + localTime + ' local time)' + } + + details += '.' + + return details +} + +// handoffSummary returns the summary description for the rotation +export function handoffSummary(rotation) { + const tz = rotation.timeZone + + let details = '' + switch (rotation.type) { + case 'hourly': + details += 'First hand off time at ' + formatTime(rotation.start, tz) + details += + ', hands off every ' + + (rotation.shiftLength === 1 + ? 'hour' + : rotation.shiftLength + ' hours') + + '.' + break + case 'daily': + details += 'Hands off ' + details += + rotation.shiftLength === 1 + ? 'daily at' + : `every ${rotation.shiftLength} days at` + details += ' ' + formatTime(rotation.start, tz) + '.' + break + case 'weekly': + details += formatWeeklySummary(rotation.shiftLength, rotation.start, tz) + break + } + + return details +} + +// reorderList will move an item from the oldIndex to the newIndex, preserving order +// returning the result as a new array. +export function reorderList(_items, oldIndex, newIndex) { + let items = _items.slice() + items.splice(oldIndex, 1) // remove 1 element from oldIndex position + items.splice(newIndex, 0, _items[oldIndex]) // add dest to newIndex position + return items +} diff --git a/web/src/app/rotations/util.test.js b/web/src/app/rotations/util.test.js new file mode 100644 index 0000000000..26c27556a6 --- /dev/null +++ b/web/src/app/rotations/util.test.js @@ -0,0 +1,161 @@ +import { calcNewActiveIndex, handoffSummary, reorderList } from './util' +import { Settings } from 'luxon' + +describe('calcNewActiveIndex', () => { + let oldZone = '' + beforeAll(() => { + // 01/02 03:04:05PM '06 -0700 + Settings.now = () => 1136239445 + oldZone = Settings.defaultZoneName + Settings.defaultZoneName = 'UTC' + }) + afterAll(() => { + Settings.now = () => Date.now() + Settings.defaultZone = oldZone + }) + const check = (aIdx, oldIdx, newIdx, exp) => { + expect(calcNewActiveIndex(aIdx, oldIdx, newIdx)).toBe(exp) + } + test('should return -1 when no change', () => { + check(0, 1, 2, -1) + check(0, 2, 1, -1) + check(3, 1, 2, -1) + check(3, 2, 1, -1) + check(0, 0, 0, -1) + }) + test('should return newIndex when active user is being dragged', () => { + check(1, 1, 2, 2) + check(2, 2, 1, 1) + }) + test('should return newIndex +1 ', () => { + check(0, 2, 0, 1) + check(1, 2, 0, 2) + }) + test('should return newIndex -1', () => { + check(1, 0, 2, 0) + check(1, 0, 1, 0) + }) +}) + +describe('handoffSummary', () => { + const check = (rotation, exp) => { + expect(handoffSummary(rotation)).toBe(exp) + } + + test('should be as per hourly rotation', () => { + check( + { + shiftLength: 1, + start: '2018-07-25T02:22:33Z', + timeZone: 'UTC', + type: 'hourly', + }, + 'First hand off time at 2:22 AM UTC, hands off every hour.', + ) + + check( + { + shiftLength: 1, + start: '2017-07-14T06:32:33Z', + timeZone: 'Asia/Kolkata', + type: 'hourly', + }, + 'First hand off time at 12:02 PM Asia/Kolkata (6:32 AM local), hands off every hour.', + ) + }) + + test('should be as per daily rotation', () => { + check( + { + shiftLength: 2, + start: '2018-02-25T09:10:22Z', + timeZone: 'America/Cancun', + type: 'daily', + }, + 'Hands off every 2 days at 4:10 AM America/Cancun (9:10 AM local).', + ) + + check( + { + shiftLength: 1, + start: '2017-07-14T06:32:33Z', + timeZone: 'UTC', + type: 'daily', + }, + 'Hands off daily at 6:32 AM UTC.', + ) + }) + + test('should be as per weekly rotation', () => { + check( + { + shiftLength: 2, + start: '2018-02-25T09:10:22Z', + timeZone: 'UTC', + type: 'weekly', + }, + 'Hands off every 2 weeks on Sunday at 9:10 AM UTC.', + ) + + check( + { + shiftLength: 2, + start: '2017-06-26T06:50:11Z', + timeZone: 'Asia/Kolkata', + type: 'weekly', + }, + 'Hands off every 2 weeks on Monday at 12:20 PM Asia/Kolkata (Monday at 6:50 AM local time).', + ) + }) +}) + +describe('reorderList', () => { + const check = (users, oldIdx, newIdx, exp) => { + expect(reorderList(users, oldIdx, newIdx)).toEqual(exp) + } + + test('should return reordered user list', () => { + check(['aaa', 'bbb', 'ccc'], 0, 0, ['aaa', 'bbb', 'ccc']) + check(['aaa', 'bbb', 'ccc'], 0, 1, ['bbb', 'aaa', 'ccc']) + check(['aaa', 'bbb', 'ccc'], 0, 2, ['bbb', 'ccc', 'aaa']) + check(['aaa', 'bbb', 'ccc'], 1, 0, ['bbb', 'aaa', 'ccc']) + check(['aaa', 'bbb', 'ccc'], 1, 1, ['aaa', 'bbb', 'ccc']) + check(['aaa', 'bbb', 'ccc'], 1, 2, ['aaa', 'ccc', 'bbb']) + check(['aaa', 'bbb', 'ccc'], 2, 0, ['ccc', 'aaa', 'bbb']) + check(['aaa', 'bbb', 'ccc'], 2, 1, ['aaa', 'ccc', 'bbb']) + check(['aaa', 'bbb', 'ccc'], 2, 2, ['aaa', 'bbb', 'ccc']) + + check(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], 6, 0, [ + 'g', + 'a', + 'b', + 'c', + 'd', + 'e', + 'f', + 'h', + ]) + + check(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], 3, 7, [ + 'a', + 'b', + 'c', + 'e', + 'f', + 'g', + 'h', + 'd', + ]) + + check(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], 3, 0, [ + 'd', + 'a', + 'b', + 'c', + 'e', + 'f', + 'g', + 'h', + ]) + }) +}) diff --git a/web/src/app/schedules/CalendarEventWrapper.js b/web/src/app/schedules/CalendarEventWrapper.js new file mode 100644 index 0000000000..8c84107d3b --- /dev/null +++ b/web/src/app/schedules/CalendarEventWrapper.js @@ -0,0 +1,166 @@ +import React, { Component } from 'react' +import { PropTypes as p } from 'prop-types' +import Button from '@material-ui/core/Button' +import IconButton from '@material-ui/core/IconButton' +import Grid from '@material-ui/core/Grid' +import RemoveIcon from '@material-ui/icons/Delete' +import Tooltip from '@material-ui/core/Tooltip' +import withStyles from '@material-ui/core/styles/withStyles' +import { connect } from 'react-redux' +import moment from 'moment' +import { urlParamSelector } from '../selectors' +import { DateTime, Duration } from 'luxon' + +const styles = theme => ({ + button: { + padding: '4px', + minHeight: 0, + fontSize: 12, + }, + buttonContainer: { + display: 'flex', + alignItems: 'center', + }, + flexGrow: { + flexGrow: 1, + }, + icon: { + color: theme.palette.primary['500'], + }, + tooltip: { + background: theme.palette.common.white, + color: theme.palette.text.primary, + boxShadow: theme.shadows[1], + fontSize: 12, + marginTop: '0.1em', + marginBottom: '0.1em', + }, + popper: { + opacity: 1, + }, +}) + +const mapStateToProps = state => { + // false: monthly, true: weekly + const weekly = urlParamSelector(state)('weekly', false) + let start = urlParamSelector(state)( + 'start', + DateTime.local() + .startOf('day') + .toISO(), + ) + + const activeOnly = urlParamSelector(state)('activeOnly', false) + if (activeOnly) { + start = DateTime.local().toISO() + } + + let end = DateTime.fromISO(start) + .plus(Duration.fromISO(weekly ? 'P7D' : 'P1M')) + .toISO() + + return { + start, + end, + userFilter: urlParamSelector(state)('userFilter', []), + activeOnly, + } +} + +@withStyles(styles) +@connect( + mapStateToProps, + null, +) +export default class CalendarEventWrapper extends Component { + static propTypes = { + event: p.object.isRequired, + onOverrideClick: p.func.isRequired, + } + + handleShowOverrideForm = type => { + const { event, onOverrideClick } = this.props + + onOverrideClick({ + variant: type, + defaultValue: { + start: event.start.toISOString(), + end: event.end.toISOString(), + removeUserID: event.userID, + }, + }) + } + + /* + * Renders an interactive tooltip when hovering + * over an event in the calendar that will show + * the full shift start and end date times, as + * well as the ability to replace or remove that + * shift as an override, if possible (not in the + * past). + */ + renderInteractiveTooltip = () => { + const { classes, event } = this.props + + let overrideCtrls = null + if (moment(event.end).isAfter(moment())) { + overrideCtrls = ( + + + + + + + this.handleShowOverrideForm('remove')} + > + + + + + ) + } + + return ( + + + {moment(event.start).format('LLL')} + {' – '} + {moment(event.end).format('LLL')} + + {overrideCtrls} + + ) + } + + render() { + const { children, classes } = this.props + + return ( + + {children} + + ) + } +} diff --git a/web/src/app/schedules/CalendarToolbar.js b/web/src/app/schedules/CalendarToolbar.js new file mode 100644 index 0000000000..3d3edde938 --- /dev/null +++ b/web/src/app/schedules/CalendarToolbar.js @@ -0,0 +1,188 @@ +import React from 'react' +import { PropTypes as p } from 'prop-types' +import Button from '@material-ui/core/Button' +import Grid from '@material-ui/core/Grid' +import Typography from '@material-ui/core/Typography' +import withStyles from '@material-ui/core/styles/withStyles' +import moment from 'moment' +import { connect } from 'react-redux' +import { urlParamSelector } from '../selectors' + +const styles = { + abs: { + position: 'absolute', + }, + container: { + paddingBottom: '1em', + justifyContent: 'center', + alignItems: 'flex-end', + }, + flexGrow: { + flexGrow: 1, + }, + // borderRadius: top left, top right, bottom right, bottom left + today: { + borderRadius: '4px 0 0 4px', + }, + back: { + borderRadius: 0, + borderLeft: 0, + borderRight: 0, + }, + next: { + borderRadius: '0 4px 4px 0', + }, + month: { + borderRadius: '4px 0 0 4px', + }, + week: { + borderRadius: '0 4px 4px 0', + borderLeft: 0, + }, +} + +const mapStateToProps = state => ({ + weekly: urlParamSelector(state)('weekly', false), +}) + +@withStyles(styles) +@connect( + mapStateToProps, + null, +) +export default class CalendarToolbar extends React.PureComponent { + static propTypes = { + date: p.instanceOf(Date).isRequired, + label: p.string.isRequired, + onNavigate: p.func.isRequired, + onOverrideClick: p.func.isRequired, + onView: p.func.isRequired, + view: p.string.isRequired, + } + + /* + * Moves the calendar to the current day in + * respect of the current view type. + * + * e.g. Current day: March 22, while viewing + * April in monthly. Clicking "Today" would + * reset the calendar back to March. + */ + onTodayClick = e => { + this.props.onNavigate(e, moment().toDate()) + } + + /* + * Go backwards 1 week or 1 month, depending + * on the current view type. + */ + onBackClick = e => { + const { date, weekly } = this.props + const nextDate = weekly + ? moment(date) + .clone() + .subtract(1, 'week') + : moment(date) + .clone() + .subtract(1, 'month') + + this.props.onNavigate(e, nextDate.toDate()) + } + + /* + * Advance 1 week or 1 month, depending + * on the current view type. + */ + onNextClick = e => { + const { date, weekly } = this.props + + // either month or week + let dateCopy = moment(date).clone() + let nextDate = weekly ? dateCopy.add(1, 'week') : dateCopy.add(1, 'month') + this.props.onNavigate(e, nextDate.toDate()) + } + + /* + * Switches the calendar to a monthly view. + */ + onMonthClick = () => { + this.props.onView('month') + } + + /* + * Switches the calendar to a weekly view. + */ + onWeekClick = () => { + this.props.onView('week') + } + + render() { + const { classes, label, onOverrideClick, view } = this.props + + return ( + + + + + + + + + + + + + + + + + {label} + + + + ) + } +} diff --git a/web/src/app/schedules/ScheduleAssignedToList.js b/web/src/app/schedules/ScheduleAssignedToList.js new file mode 100644 index 0000000000..5ce6d383fe --- /dev/null +++ b/web/src/app/schedules/ScheduleAssignedToList.js @@ -0,0 +1,49 @@ +import React from 'react' +import p from 'prop-types' +import gql from 'graphql-tag' +import Query from '../util/Query' +import FlatList from '../lists/FlatList' +import { Grid, Card } from '@material-ui/core' + +const query = gql` + query($id: ID!) { + schedule(id: $id) { + id + assignedTo { + id + type + name + } + } + } +` + +export default class ScheduleAssignedToList extends React.PureComponent { + static propTypes = { + scheduleID: p.string.isRequired, + } + render() { + return ( + + ) + } + renderList({ data }) { + return ( + + + ({ + title: t.name, + url: `/escalation-policies/${t.id}`, + }))} + emptyMessage='This schedule is not assigned to any escalation policies.' + /> + + + ) + } +} diff --git a/web/src/app/schedules/ScheduleCalendar.js b/web/src/app/schedules/ScheduleCalendar.js new file mode 100644 index 0000000000..1c12b17d8d --- /dev/null +++ b/web/src/app/schedules/ScheduleCalendar.js @@ -0,0 +1,274 @@ +import React from 'react' +import { PropTypes as p } from 'prop-types' +import Card from '@material-ui/core/Card' +import Typography from '@material-ui/core/Typography' +import withStyles from '@material-ui/core/styles/withStyles' +import { connect } from 'react-redux' +import moment from 'moment' +import BigCalendar from 'react-big-calendar' +import '../../node_modules/react-big-calendar/lib/css/react-big-calendar.css' +import CalendarEventWrapper from './CalendarEventWrapper' +import CalendarToolbar from './CalendarToolbar' +import ScheduleOverrideCreateDialog from './ScheduleOverrideCreateDialog' +import { resetURLParams, setURLParam } from '../actions' +import { urlParamSelector } from '../selectors' +import { DateTime, Interval } from 'luxon' + +const localizer = BigCalendar.momentLocalizer(moment) + +const styles = { + calendarContainer: { + padding: '1em', + }, +} + +const mapStateToProps = state => { + // false: monthly, true: weekly + const weekly = urlParamSelector(state)('weekly', false) + let start = urlParamSelector(state)( + 'start', + weekly + ? moment() + .startOf('week') + .toISOString() + : moment() + .startOf('month') + .toISOString(), + ) + + let end = moment(start) + .add(1, weekly ? 'week' : 'month') + .toISOString() + + return { + start, + end, + weekly, + activeOnly: urlParamSelector(state)('activeOnly', false), + userFilter: urlParamSelector(state)('userFilter', []), + } +} + +const mapDispatchToProps = dispatch => { + return { + setWeekly: value => dispatch(setURLParam('weekly', value)), + setStart: value => dispatch(setURLParam('start', value)), + resetFilter: () => + dispatch( + resetURLParams('userFilter', 'start', 'activeOnly', 'tz', 'weekly'), + ), + } +} + +@withStyles(styles) +@connect( + mapStateToProps, + mapDispatchToProps, +) +export default class ScheduleCalendar extends React.PureComponent { + static propTypes = { + scheduleID: p.string.isRequired, + shifts: p.array.isRequired, + } + + state = { + /* + * overrideDialog should be either an object of + * the dialog properties to use, or null to close + * the dialog. + */ + overrideDialog: null, + } + + /* + * Offsets the calendar forward or backwards + * a week or month, depending on the current + * view type. + */ + onNavigate = nextDate => { + if (this.props.weekly) { + this.props.setStart( + moment(nextDate) + .startOf('week') + .startOf('day') + .toISOString(), + ) + } else { + this.props.setStart( + moment(nextDate) + .startOf('month') + .startOf('day') + .toISOString(), + ) + } + } + + /* + * Resets the start date to the beginning of the month + * when switching views. + * + * e.g. Monthly: February -> Weekly: Start at the week + * of February 1st + * + * e.g. Weekly: February 17-23 -> Monthly: Start at the + * beginning of February + * + * If viewing the current month however, show the current + * week. + */ + onView = nextView => { + const start = this.props.start + const prevStartMonth = moment(start).month() + const currMonth = moment().month() + + // if viewing the current month, show the current week + if (nextView === 'week' && prevStartMonth === currMonth) { + this.props.setWeekly(true) + this.props.setStart( + moment() + .startOf('week') + .toISOString(), + ) + + // if not on the current month, show the first week of the month + } else if (nextView === 'week' && prevStartMonth !== currMonth) { + this.props.setWeekly(true) + this.props.setStart( + moment(this.props.start) + .startOf('month') + .startOf('week') + .toISOString(), + ) + + // go from week to monthly view + // e.g. if navigating to an overlap of two months such as + // Jan 27 - Feb 2, show the latter month (February) + } else { + this.props.setWeekly(false) + + this.props.setStart( + moment(start) + .endOf('week') + .startOf('month') + .toISOString(), + ) + } + } + + /* + * Return a GoAlert dog red color for the events, and a slightly + * darker version of that red if selected + */ + eventStyleGetter = (event, start, end, isSelected) => { + return { + style: { + backgroundColor: isSelected ? '#8f1022' : '#cd1831', + borderColor: '#8f1022', + }, + } + } + + /* + * Return a light red shade of the current date instead of + * the default light blue + */ + dayPropGetter = date => { + if (moment(date).isSame(moment(), 'd')) { + return { + style: { + backgroundColor: '#FFECEC', + }, + } + } + } + + render() { + const { classes, shifts, start, weekly } = this.props + + // fill available doesn't work in weekly view + const height = weekly ? '100%' : '-webkit-fill-available' + + return ( + + + + Times shown are in{' '} + {Intl.DateTimeFormat().resolvedOptions().timeZone} + + + +
    + null} + views={['month', 'week']} + view={weekly ? 'week' : 'month'} + popup + eventPropGetter={this.eventStyleGetter} + dayPropGetter={this.dayPropGetter} + onNavigate={this.onNavigate} + onView={this.onView} + components={{ + eventWrapper: props => ( + + this.setState({ overrideDialog }) + } + {...props} + /> + ), + toolbar: props => ( + + this.setState({ overrideDialog: { variant: 'add' } }) + } + {...props} + /> + ), + }} + /> +
    +
    + {Boolean(this.state.overrideDialog) && ( + this.setState({ overrideDialog: null })} + /> + )} +
    + ) + } + + getCalEvents = shifts => { + // if any users in users array, only show the ids present + let filteredShifts = shifts.slice() + if (this.props.userFilter.length > 0) { + filteredShifts = filteredShifts.filter(shift => + this.props.userFilter.includes(shift.user.id), + ) + } + + if (this.props.activeOnly) { + filteredShifts = filteredShifts.filter(shift => + Interval.fromDateTimes( + DateTime.fromISO(shift.start), + DateTime.fromISO(shift.end), + ).contains(DateTime.local()), + ) + } + + return filteredShifts.map(shift => { + return { + title: shift.user.name, + userID: shift.user.id, + start: new Date(shift.start), + end: new Date(shift.end), + } + }) + } +} diff --git a/web/src/app/schedules/ScheduleCalendarQuery.js b/web/src/app/schedules/ScheduleCalendarQuery.js new file mode 100644 index 0000000000..739726efc3 --- /dev/null +++ b/web/src/app/schedules/ScheduleCalendarQuery.js @@ -0,0 +1,81 @@ +import React from 'react' +import gql from 'graphql-tag' +import Query from '../util/Query' +import ScheduleCalendar from './ScheduleCalendar' +import { urlParamSelector } from '../selectors' +import { connect } from 'react-redux' +import withWidth, { isWidthDown } from '@material-ui/core/withWidth/index' +import moment from 'moment/moment' + +const query = gql` + query scheduleCalendarShifts( + $id: ID! + $start: ISOTimestamp! + $end: ISOTimestamp! + ) { + schedule(id: $id) { + id + shifts(start: $start, end: $end) { + user { + id + name + } + start + end + truncated + } + } + } +` + +const mapStateToProps = state => { + // false: monthly, true: weekly + const weekly = urlParamSelector(state)('weekly', false) + let start = urlParamSelector(state)( + 'start', + weekly + ? moment() + .startOf('week') + .toISOString() + : moment() + .startOf('month') + .toISOString(), + ) + + let end = moment(start) + .add(1, weekly ? 'week' : 'month') + .toISOString() + + return { + start, + end, + } +} + +@withWidth() +@connect( + mapStateToProps, + null, +) +export default class ScheduleCalendarQuery extends React.PureComponent { + render() { + if (isWidthDown('sm', this.props.width)) return null + + return ( + ( + + )} + /> + ) + } +} diff --git a/web/src/app/schedules/ScheduleCreateDialog.js b/web/src/app/schedules/ScheduleCreateDialog.js new file mode 100644 index 0000000000..69ac1fe5c8 --- /dev/null +++ b/web/src/app/schedules/ScheduleCreateDialog.js @@ -0,0 +1,73 @@ +import React from 'react' +import FormDialog from '../dialogs/FormDialog' +import ScheduleForm from './ScheduleForm' +import gql from 'graphql-tag' +import { Mutation } from 'react-apollo' +import { graphql2Client } from '../apollo' +import { nonFieldErrors, fieldErrors } from '../util/errutil' +import { Redirect } from 'react-router' + +const mutation = gql` + mutation($input: CreateScheduleInput!) { + createSchedule(input: $input) { + id + name + description + timeZone + } + } +` + +export default class ScheduleCreateDialog extends React.PureComponent { + state = { + value: { + name: '', + description: '', + timeZone: Intl.DateTimeFormat().resolvedOptions().timeZone, + }, + } + render() { + return ( + + {this.renderForm} + + ) + } + renderForm = (commit, status) => { + if (status.data && status.data.createSchedule) { + return ( + + ) + } + return ( + + commit({ + variables: { + input: { + ...this.state.value, + targets: [ + { + target: { type: 'user', id: '__current_user' }, + rules: [{}], + }, + ], + }, + }, + }) + } + form={ + this.setState({ value })} + /> + } + /> + ) + } +} diff --git a/web/src/app/schedules/ScheduleDeleteDialog.js b/web/src/app/schedules/ScheduleDeleteDialog.js new file mode 100644 index 0000000000..75da37e8af --- /dev/null +++ b/web/src/app/schedules/ScheduleDeleteDialog.js @@ -0,0 +1,82 @@ +import React from 'react' +import p from 'prop-types' + +import { graphql2Client } from '../apollo' +import gql from 'graphql-tag' +import { Mutation } from 'react-apollo' +import { nonFieldErrors } from '../util/errutil' +import { Redirect } from 'react-router' +import Query from '../util/Query' + +import FormDialog from '../dialogs/FormDialog' + +const query = gql` + query($id: ID!) { + schedule(id: $id) { + id + name + } + } +` +const mutation = gql` + mutation delete($input: [TargetInput!]!) { + deleteAll(input: $input) + } +` + +export default class ScheduleDeleteDialog extends React.PureComponent { + static propTypes = { + scheduleID: p.string.isRequired, + onClose: p.func, + } + + state = { + deleteEP: true, + } + + render() { + return ( + this.renderMutation(data.schedule)} + /> + ) + } + + renderMutation(data) { + return ( + + {(commit, status) => this.renderDialog(data, commit, status)} + + ) + } + + renderDialog(data, commit, mutStatus) { + const { loading, error, data: mutData } = mutStatus + if (mutData && mutData.deleteAll) { + return + } + + return ( + { + return commit({ + variables: { + input: [{ type: 'schedule', id: this.props.scheduleID }], + }, + }) + }} + /> + ) + } +} diff --git a/web/src/app/schedules/ScheduleDetails.js b/web/src/app/schedules/ScheduleDetails.js new file mode 100644 index 0000000000..35cfed363c --- /dev/null +++ b/web/src/app/schedules/ScheduleDetails.js @@ -0,0 +1,151 @@ +import React from 'react' +import p from 'prop-types' +import gql from 'graphql-tag' +import FormControlLabel from '@material-ui/core/FormControlLabel' +import Grid from '@material-ui/core/Grid' +import Switch from '@material-ui/core/Switch' +import DetailsPage from '../details/DetailsPage' +import Query from '../util/Query' +import { UserSelect } from '../selection' +import FilterContainer from '../util/FilterContainer' +import PageActions from '../util/PageActions' +import OtherActions from '../util/OtherActions' +import ScheduleEditDialog from './ScheduleEditDialog' +import ScheduleDeleteDialog from './ScheduleDeleteDialog' +import ScheduleCalendarQuery from './ScheduleCalendarQuery' +import { urlParamSelector } from '../selectors' +import { resetURLParams, setURLParam } from '../actions' +import { connect } from 'react-redux' + +const query = gql` + query($id: ID!) { + schedule(id: $id) { + id + name + description + timeZone + } + } +` +const partialQuery = gql` + query($id: ID!) { + schedule(id: $id) { + id + name + description + } + } +` + +const mapStateToProps = state => ({ + userFilter: urlParamSelector(state)('userFilter', []), + activeOnly: urlParamSelector(state)('activeOnly', false), +}) + +const mapDispatchToProps = dispatch => { + return { + setUserFilter: value => dispatch(setURLParam('userFilter', value)), + setActiveOnly: value => dispatch(setURLParam('activeOnly', value)), + resetFilter: () => + dispatch( + resetURLParams('userFilter', 'start', 'activeOnly', 'tz', 'duration'), + ), + } +} + +@connect( + mapStateToProps, + mapDispatchToProps, +) +export default class ScheduleDetails extends React.PureComponent { + static propTypes = { + scheduleID: p.string.isRequired, + } + + state = { + edit: false, + delete: false, + } + + render() { + return ( + this.renderPage(data.schedule)} + /> + ) + } + + renderPage = data => { + return ( + + + this.props.resetFilter()}> + + this.props.setActiveOnly(e.target.checked)} + value='activeOnly' + /> + } + label='Active shifts only' + /> + + + + + + this.setState({ edit: true }), + }, + { + label: 'Delete Schedule', + onClick: () => this.setState({ delete: true }), + }, + ]} + /> + + Time Zone: {data.timeZone} + } + links={[ + { label: 'Assignments', url: 'assignments' }, + { label: 'Escalation Policies', url: 'escalation-policies' }, + { label: 'Overrides', url: 'overrides' }, + { label: 'Shifts', url: 'shifts' }, + ]} + pageFooter={ + + } + /> + {this.state.edit && ( + this.setState({ edit: false })} + /> + )} + {this.state.delete && ( + this.setState({ delete: false })} + /> + )} + + ) + } +} diff --git a/web/src/app/schedules/ScheduleEditDialog.js b/web/src/app/schedules/ScheduleEditDialog.js new file mode 100644 index 0000000000..2bee61e38d --- /dev/null +++ b/web/src/app/schedules/ScheduleEditDialog.js @@ -0,0 +1,96 @@ +import React from 'react' +import p from 'prop-types' +import FormDialog from '../dialogs/FormDialog' +import ScheduleForm from './ScheduleForm' +import gql from 'graphql-tag' +import { Mutation } from 'react-apollo' +import { graphql2Client } from '../apollo' +import { nonFieldErrors, fieldErrors } from '../util/errutil' +import Query from '../util/Query' + +const query = gql` + query($id: ID!) { + schedule(id: $id) { + id + name + description + timeZone + } + } +` + +const mutation = gql` + mutation($input: UpdateScheduleInput!) { + updateSchedule(input: $input) + } +` + +export default class ScheduleEditDialog extends React.PureComponent { + static propTypes = { + scheduleID: p.string.isRequired, + onClose: p.func, + } + state = { + value: null, + } + render() { + return ( + this.renderMutation(data.schedule)} + /> + ) + } + renderMutation(data) { + return ( + [ + { + query, + variables: { id: this.props.scheduleID }, + }, + ]} + > + {(...args) => this.renderForm(data, ...args)} + + ) + } + renderForm = (data, commit, status) => { + return ( + + commit({ + variables: { + input: { + id: this.props.scheduleID, + ...this.state.value, + }, + }, + }) + } + form={ + this.setState({ value })} + /> + } + /> + ) + } +} diff --git a/web/src/app/schedules/ScheduleForm.js b/web/src/app/schedules/ScheduleForm.js new file mode 100644 index 0000000000..30990b5563 --- /dev/null +++ b/web/src/app/schedules/ScheduleForm.js @@ -0,0 +1,58 @@ +import React from 'react' +import p from 'prop-types' +import { FormContainer, FormField } from '../forms' +import { TextField, Grid } from '@material-ui/core' +import { TimeZoneSelect } from '../selection' + +export default class ScheduleForm extends React.PureComponent { + static propTypes = { + value: p.shape({ + name: p.string.isRequired, + description: p.string.isRequired, + timeZone: p.string.isRequired, + }).isRequired, + + errors: p.arrayOf( + p.shape({ + field: p.oneOf(['name', 'description', 'timeZone']).isRequired, + message: p.string.isRequired, + }), + ), + + onChange: p.func.isRequired, + } + render() { + return ( + + + + + + + + + + + + ) + } +} diff --git a/web/src/app/schedules/ScheduleNewOverrideFAB.js b/web/src/app/schedules/ScheduleNewOverrideFAB.js new file mode 100644 index 0000000000..b09d94b2a1 --- /dev/null +++ b/web/src/app/schedules/ScheduleNewOverrideFAB.js @@ -0,0 +1,32 @@ +import React from 'react' +import p from 'prop-types' +import SpeedDial from '../util/SpeedDial' +import { AccountSwitch, AccountMinus, AccountPlus } from 'mdi-material-ui' + +export default class ScheduleNewOverrideFAB extends React.PureComponent { + static propsTypes = { + onClick: p.func.isRequired, + } + + actions = [ + { + label: 'Temporarily Replace a User', + onClick: () => this.props.onClick('replace'), + icon: , + }, + { + label: 'Temporarily Remove a User', + onClick: () => this.props.onClick('remove'), + icon: , + }, + { + label: 'Temporarily Add a User', + onClick: () => this.props.onClick('add'), + icon: , + }, + ] + + render() { + return + } +} diff --git a/web/src/app/schedules/ScheduleOverrideCreateDialog.js b/web/src/app/schedules/ScheduleOverrideCreateDialog.js new file mode 100644 index 0000000000..d40e2803ed --- /dev/null +++ b/web/src/app/schedules/ScheduleOverrideCreateDialog.js @@ -0,0 +1,122 @@ +import React from 'react' +import p from 'prop-types' +import { Mutation } from 'react-apollo' +import FormDialog from '../dialogs/FormDialog' +import { DateTime } from 'luxon' +import ScheduleOverrideForm from './ScheduleOverrideForm' +import { fieldErrors, nonFieldErrors } from '../util/errutil' +import gql from 'graphql-tag' +import { graphql2Client } from '../apollo' + +const copyText = { + add: { + title: 'Temporarily Add a User', + desc: + 'This will add a new shift for the selected user, while the override is active. Existing shifts will remain unaffected.', + }, + remove: { + title: 'Temporarily Remove a User', + desc: + 'This will remove (or split/shorten) shifts belonging to the selected user, while the override is active.', + }, + replace: { + title: 'Temporarily Replace a User', + desc: + 'This will replace the selected user with another during any existing shifts, while the override is active. No new shifts will be created, only who is on-call will be changed.', + }, +} + +const mutation = gql` + mutation($input: CreateUserOverrideInput!) { + createUserOverride(input: $input) { + id + } + } +` + +export default class ScheduleOverrideCreateDialog extends React.PureComponent { + static propTypes = { + scheduleID: p.string.isRequired, + variant: p.oneOf(['add', 'remove', 'replace']).isRequired, + onClose: p.func, + defaultValue: p.shape({ + addUserID: p.string, + removeUserID: p.string, + start: p.string, + end: p.string, + }), + } + + static defaultProps = { + defaultValue: {}, + } + + constructor(props) { + super(props) + + this.state = { + value: { + addUserID: '', + removeUserID: '', + start: DateTime.local() + .startOf('hour') + .toISO(), + end: DateTime.local() + .startOf('hour') + .plus({ hours: 8 }) + .toISO(), + ...props.defaultValue, + }, + } + } + + render() { + return ( + + {this.renderDialog} + + ) + } + + renderDialog = (commit, status) => { + return ( + + commit({ + variables: { + input: { + ...this.state.value, + scheduleID: this.props.scheduleID, + }, + }, + }) + } + form={ + this.setState({ value })} + /> + } + /> + ) + } +} diff --git a/web/src/app/schedules/ScheduleOverrideDeleteDialog.js b/web/src/app/schedules/ScheduleOverrideDeleteDialog.js new file mode 100644 index 0000000000..0b86157e21 --- /dev/null +++ b/web/src/app/schedules/ScheduleOverrideDeleteDialog.js @@ -0,0 +1,109 @@ +import React from 'react' +import p from 'prop-types' + +import { graphql2Client } from '../apollo' +import { connect } from 'react-redux' +import gql from 'graphql-tag' +import { Mutation } from 'react-apollo' +import { nonFieldErrors } from '../util/errutil' +import Query from '../util/Query' +import { Typography } from '@material-ui/core' +import FormDialog from '../dialogs/FormDialog' +import { urlParamSelector } from '../selectors' +import { formatOverrideTime } from './util' + +const query = gql` + query($id: ID!) { + userOverride(id: $id) { + id + start + end + addUser { + id + name + } + removeUser { + id + name + } + } + } +` + +const mutation = gql` + mutation($id: ID!) { + deleteAll(input: [{ type: userOverride, id: $id }]) + } +` + +@connect(state => ({ zone: urlParamSelector(state)('tz') })) +export default class ScheduleOverrideDeleteDialog extends React.PureComponent { + static propTypes = { + overrideID: p.string.isRequired, + onClose: p.func, + } + + renderQuery() { + return ( + this.renderMutation(data.userOverride)} + /> + ) + } + + renderMutation(data) { + return ( + + {(commit, status) => this.renderDialog(data, commit, status)} + + ) + } + + renderDialog(data, commit, mutStatus) { + const { loading, error } = mutStatus + + const zone = this.props.zone + const isReplace = data.addUser && data.removeUser + const verb = data.addUser ? 'Added' : 'Removed' + + const time = formatOverrideTime(data.start, data.end, zone) + + const caption = isReplace + ? `Replaced ${data.removeUser.name} from ${time}` + : `${verb} from ${time}` + return ( + { + return commit({ + variables: { + id: this.props.overrideID, + }, + }) + }} + form={{caption}} + /> + ) + } + + render() { + return this.renderQuery() + } +} diff --git a/web/src/app/schedules/ScheduleOverrideEditDialog.js b/web/src/app/schedules/ScheduleOverrideEditDialog.js new file mode 100644 index 0000000000..a7bda72786 --- /dev/null +++ b/web/src/app/schedules/ScheduleOverrideEditDialog.js @@ -0,0 +1,115 @@ +import React from 'react' +import p from 'prop-types' +import { Mutation } from 'react-apollo' +import FormDialog from '../dialogs/FormDialog' +import ScheduleOverrideForm from './ScheduleOverrideForm' +import { fieldErrors, nonFieldErrors } from '../util/errutil' +import gql from 'graphql-tag' +import { graphql2Client } from '../apollo' +import Query from '../util/Query' + +const query = gql` + query($id: ID!) { + userOverride(id: $id) { + id + start + end + target { + id + } + addUser { + id + } + removeUser { + id + } + } + } +` +const mutation = gql` + mutation($input: UpdateUserOverrideInput!) { + updateUserOverride(input: $input) + } +` + +export default class ScheduleOverrideEditDialog extends React.PureComponent { + static propTypes = { + overrideID: p.string.isRequired, + onClose: p.func, + } + + state = { + value: null, + } + + render() { + return ( + this.renderMutation(data.userOverride)} + /> + ) + } + renderMutation(data) { + return ( + + {(commit, status) => this.renderDialog(data, commit, status)} + + ) + } + getValue(data) { + if (this.state.value) return this.state.value + const value = { + start: data.start, + end: data.end, + } + + value.addUserID = data.addUser ? data.addUser.id : '' + value.removeUserID = data.removeUser ? data.removeUser.id : '' + + return value + } + renderDialog(data, commit, status) { + return ( + { + if (this.state.value === null) { + this.props.onClose() + return + } + commit({ + variables: { + input: { + ...this.state.value, + id: this.props.overrideID, + }, + }, + }) + }} + form={ + this.setState({ value })} + /> + } + /> + ) + } +} diff --git a/web/src/app/schedules/ScheduleOverrideForm.js b/web/src/app/schedules/ScheduleOverrideForm.js new file mode 100644 index 0000000000..dd4c266e88 --- /dev/null +++ b/web/src/app/schedules/ScheduleOverrideForm.js @@ -0,0 +1,200 @@ +import React from 'react' +import p from 'prop-types' +import { FormContainer, FormField } from '../forms' +import { + Grid, + InputAdornment, + IconButton, + Typography, + withStyles, +} from '@material-ui/core' +import { ScheduleTZFilter } from './ScheduleTZFilter' +import { connect } from 'react-redux' +import { urlParamSelector } from '../selectors' +import { DateRange, ChevronRight, ChevronLeft } from '@material-ui/icons' +import { DateTimePicker } from 'material-ui-pickers' +import { DateTime } from 'luxon' +import { UserSelect } from '../selection' +import Query from '../util/Query' +import gql from 'graphql-tag' +import { mapOverrideUserError } from './util' +import DialogContentError from '../dialogs/components/DialogContentError' + +const query = gql` + query($id: ID!) { + userOverride(id: $id) { + id + addUser { + id + name + } + removeUser { + id + name + } + start + end + } + } +` + +const styles = theme => ({ + tzNote: { + display: 'flex', + alignItems: 'center', + }, +}) + +@connect(state => ({ zone: urlParamSelector(state)('tz', 'local') })) +@withStyles(styles) +export default class ScheduleOverrideForm extends React.PureComponent { + static propTypes = { + scheduleID: p.string.isRequired, + + value: p.shape({ + addUserID: p.string.isRequired, + removeUserID: p.string.isRequired, + start: p.string.isRequired, + end: p.string.isRequired, + }).isRequired, + + add: p.bool, + remove: p.bool, + + errors: p.arrayOf( + p.shape({ + field: p.oneOf(['addUserID', 'removeUserID', 'userID', 'start', 'end']) + .isRequired, + message: p.string.isRequired, + }), + ), + + onChange: p.func.isRequired, + } + + render() { + const userError = this.props.errors.find(e => e.field === 'userID') + return ( + this.renderForm(data)} + /> + ) + } + + renderForm(data) { + const { add, remove, zone, errors, value, ...formProps } = this.props + const userError = errors.find(e => e.field === 'userID') + const formErrors = errors + .filter(e => e.field !== 'userID') + .concat( + userError ? mapOverrideUserError(data.userOverride, value, zone) : [], + ) + + return ( + + + + + Start and end time shown in{' '} + {zone === 'local' ? 'local time' : zone}. + + + + {/* Purposefully leaving out of form, as it's only used for converting display times. */} + `Configure in ${tz}`} + scheduleID={this.props.scheduleID} + /> + + {remove && ( + + + + )} + {add && ( + + + + )} + + DateTime.fromISO(value, { zone })} + mapOnChangeValue={value => value.toISO()} + showTodayButton + required + name='start' + leftArrowIcon={} + rightArrowIcon={} + InputProps={{ + endAdornment: ( + + + + + + ), + }} + /> + + + DateTime.fromISO(value, { zone })} + mapOnChangeValue={value => value.toISO()} + showTodayButton + name='end' + required + leftArrowIcon={} + rightArrowIcon={} + InputProps={{ + endAdornment: ( + + + + + + ), + }} + /> + + {userError && } + + + ) + } +} diff --git a/web/src/app/schedules/ScheduleOverrideList.js b/web/src/app/schedules/ScheduleOverrideList.js new file mode 100644 index 0000000000..7c8d1ab57c --- /dev/null +++ b/web/src/app/schedules/ScheduleOverrideList.js @@ -0,0 +1,196 @@ +import React from 'react' +import p from 'prop-types' +import PageActions from '../util/PageActions' +import { Grid, FormControlLabel, Switch } from '@material-ui/core' +import QueryList from '../lists/QueryList' +import gql from 'graphql-tag' +import { UserAvatar } from '../util/avatar' +import OtherActions from '../util/OtherActions' +import FilterContainer from '../util/FilterContainer' +import { UserSelect } from '../selection' +import { connect } from 'react-redux' +import { setURLParam, resetURLParams } from '../actions' +import { urlParamSelector } from '../selectors' +import { ScheduleTZFilter } from './ScheduleTZFilter' +import ScheduleOverrideCreateDialog from './ScheduleOverrideCreateDialog' +import ScheduleNewOverrideFAB from './ScheduleNewOverrideFAB' +import ScheduleOverrideDeleteDialog from './ScheduleOverrideDeleteDialog' +import { formatOverrideTime } from './util' +import ScheduleOverrideEditDialog from './ScheduleOverrideEditDialog' + +// the query name `scheduleOverrides` is used for refetch queries +const query = gql` + query scheduleOverrides($input: UserOverrideSearchOptions) { + userOverrides(input: $input) { + nodes { + id + start + end + addUser { + id + name + } + removeUser { + id + name + } + } + + pageInfo { + hasNextPage + endCursor + } + } + } +` + +const mapStateToProps = state => { + return { + userFilter: urlParamSelector(state)('userFilter', []), + showPast: urlParamSelector(state)('showPast', false), + zone: urlParamSelector(state)('tz', 'local'), + } +} +const mapDispatchToProps = dispatch => { + return { + setZone: value => dispatch(setURLParam('tz', value, 'local')), + setUserFilter: value => dispatch(setURLParam('userFilter', value)), + setShowPast: value => dispatch(setURLParam('showPast', value)), + resetFilter: () => dispatch(resetURLParams('userFilter', 'showPast', 'tz')), + } +} + +@connect( + mapStateToProps, + mapDispatchToProps, +) +export default class ScheduleOverrideList extends React.PureComponent { + static propTypes = { + scheduleID: p.string.isRequired, + } + state = { + editID: null, + deleteID: null, + create: null, + } + + render() { + const { zone } = this.props + + const subText = n => { + const timeStr = formatOverrideTime(n.start, n.end, zone) + if (n.addUser && n.removeUser) { + // replace + return `Replaces ${n.removeUser.name} from ${timeStr}` + } + if (n.addUser) { + // add + return `Added from ${timeStr}` + } + // remove + return `Removed from ${timeStr}` + } + + const zoneText = zone === 'local' ? 'local time' : zone + const hasUsers = Boolean(this.props.userFilter.length) + const note = this.props.showPast + ? `Showing all overrides${ + hasUsers ? ' for selected users' : '' + } in ${zoneText}.` + : `Showing active and future overrides${ + hasUsers ? ' for selected users' : '' + } in ${zoneText}.` + + return ( + + + this.props.resetFilter()}> + + this.props.setShowPast(e.target.checked)} + value='showPast' + /> + } + label='Show past overrides' + /> + + + + + + + + + this.setState({ create: variant })} + /> + + + ({ + title: n.addUser ? n.addUser.name : n.removeUser.name, + subText: subText(n), + icon: ( + + ), + action: ( + this.setState({ editID: n.id }), + }, + { + label: 'Delete', + onClick: () => this.setState({ deleteID: n.id }), + }, + ]} + /> + ), + })} + variables={{ + input: { + scheduleID: this.props.scheduleID, + start: this.props.showPast ? null : new Date().toISOString(), + filterAnyUserID: this.props.userFilter, + }, + }} + /> + + {this.state.create && ( + this.setState({ create: null })} + /> + )} + {this.state.deleteID && ( + this.setState({ deleteID: null })} + /> + )} + {this.state.editID && ( + this.setState({ editID: null })} + /> + )} + + ) + } +} diff --git a/web/src/app/schedules/ScheduleRouter.js b/web/src/app/schedules/ScheduleRouter.js new file mode 100644 index 0000000000..a7ccb04deb --- /dev/null +++ b/web/src/app/schedules/ScheduleRouter.js @@ -0,0 +1,87 @@ +import React from 'react' +import gql from 'graphql-tag' +import { Switch, Route } from 'react-router-dom' +import ScheduleCreateDialog from './ScheduleCreateDialog' +import ScheduleDetails from './ScheduleDetails' +import ScheduleOverrideList from './ScheduleOverrideList' +import ScheduleAssignedToList from './ScheduleAssignedToList' +import ScheduleShiftList from './ScheduleShiftList' + +import { PageNotFound } from '../error-pages/Errors' +import ScheduleRuleList from './ScheduleRuleList' +import SimpleListPage from '../lists/SimpleListPage' + +const query = gql` + query schedulesQuery($input: ScheduleSearchOptions) { + data: schedules(input: $input) { + nodes { + id + name + description + } + pageInfo { + hasNextPage + endCursor + } + } + } +` +class ScheduleList extends React.PureComponent { + render() { + return ( + ({ + title: n.name, + subText: n.description, + url: n.id, + })} + createForm={} + /> + ) + } +} + +export default class ScheduleRouter extends React.PureComponent { + render() { + return ( + + + ( + + )} + /> + + ( + + )} + /> + ( + + )} + /> + ( + + )} + /> + ( + + )} + /> + + + + ) + } +} diff --git a/web/src/app/schedules/ScheduleRuleCreateDialog.js b/web/src/app/schedules/ScheduleRuleCreateDialog.js new file mode 100644 index 0000000000..e1445db203 --- /dev/null +++ b/web/src/app/schedules/ScheduleRuleCreateDialog.js @@ -0,0 +1,85 @@ +import React from 'react' +import p from 'prop-types' +import { Mutation } from 'react-apollo' +import FormDialog from '../dialogs/FormDialog' +import ScheduleRuleForm from './ScheduleRuleForm' +import { fieldErrors, nonFieldErrors } from '../util/errutil' +import gql from 'graphql-tag' +import { graphql2Client } from '../apollo' +import { startCase } from 'lodash-es' + +const mutation = gql` + mutation($input: ScheduleTargetInput!) { + updateScheduleTarget(input: $input) + } +` + +export default class ScheduleRuleCreateDialog extends React.PureComponent { + static propTypes = { + scheduleID: p.string.isRequired, + targetType: p.oneOf(['rotation', 'user']).isRequired, + onClose: p.func, + } + + state = { + value: { + targetID: '', + rules: [ + { + start: '00:00', + end: '00:00', + weekdayFilter: [true, true, true, true, true, true, true], + }, + ], + }, + } + render() { + return ( + + {this.renderDialog} + + ) + } + + renderDialog = (commit, status) => { + return ( + { + commit({ + variables: { + input: { + target: { + type: this.props.targetType, + id: this.state.value.targetID, + }, + scheduleID: this.props.scheduleID, + + rules: this.state.value.rules, + }, + }, + }) + }} + form={ + this.setState({ value })} + /> + } + /> + ) + } +} diff --git a/web/src/app/schedules/ScheduleRuleDeleteDialog.js b/web/src/app/schedules/ScheduleRuleDeleteDialog.js new file mode 100644 index 0000000000..5f6755abb3 --- /dev/null +++ b/web/src/app/schedules/ScheduleRuleDeleteDialog.js @@ -0,0 +1,93 @@ +import React from 'react' +import p from 'prop-types' +import { Mutation } from 'react-apollo' +import FormDialog from '../dialogs/FormDialog' +import { nonFieldErrors } from '../util/errutil' +import gql from 'graphql-tag' +import { graphql2Client } from '../apollo' +import { startCase } from 'lodash-es' +import Query from '../util/Query' + +const query = gql` + query($id: ID!, $tgt: TargetInput!) { + schedule(id: $id) { + id + target(input: $tgt) { + target { + id + name + type + } + } + } + } +` + +const mutation = gql` + mutation($input: ScheduleTargetInput!) { + updateScheduleTarget(input: $input) + } +` + +export default class ScheduleRuleDeleteDialog extends React.PureComponent { + static propTypes = { + scheduleID: p.string.isRequired, + target: p.shape({ id: p.string.isRequired, type: p.string.isRequired }) + .isRequired, + onClose: p.func, + } + + render() { + return ( + this.renderMutation(data.schedule.target)} + /> + ) + } + renderMutation(data) { + return ( + + {(commit, status) => this.renderDialog(data, commit, status)} + + ) + } + + renderDialog(data, commit, status) { + return ( + { + commit({ + variables: { + input: { + target: this.props.target, + scheduleID: this.props.scheduleID, + + rules: [], + }, + }, + }) + }} + /> + ) + } +} diff --git a/web/src/app/schedules/ScheduleRuleEditDialog.js b/web/src/app/schedules/ScheduleRuleEditDialog.js new file mode 100644 index 0000000000..b8edde8035 --- /dev/null +++ b/web/src/app/schedules/ScheduleRuleEditDialog.js @@ -0,0 +1,123 @@ +import React from 'react' +import p from 'prop-types' +import { Mutation } from 'react-apollo' +import FormDialog from '../dialogs/FormDialog' +import ScheduleRuleForm from './ScheduleRuleForm' +import { fieldErrors, nonFieldErrors } from '../util/errutil' +import gql from 'graphql-tag' +import { graphql2Client } from '../apollo' +import { startCase, pick } from 'lodash-es' +import Query from '../util/Query' + +const query = gql` + query($id: ID!, $tgt: TargetInput!) { + schedule(id: $id) { + id + target(input: $tgt) { + rules { + start + end + weekdayFilter + } + } + } + } +` + +const mutation = gql` + mutation($input: ScheduleTargetInput!) { + updateScheduleTarget(input: $input) + } +` + +export default class ScheduleRuleEditDialog extends React.Component { + static propTypes = { + scheduleID: p.string.isRequired, + target: p.shape({ + type: p.oneOf(['rotation', 'user']).isRequired, + id: p.string.isRequired, + }).isRequired, + onClose: p.func, + } + + state = { + value: null, + } + + shouldComponentUpdate(nextProps, nextState) { + if (this.state !== nextState) return true + + return false + } + + render() { + return ( + this.renderMutation(data.schedule.target)} + /> + ) + } + + renderMutation(data) { + return ( + + {(commit, status) => this.renderDialog(data, commit, status)} + + ) + } + + renderDialog(data, commit, status) { + const defaults = { + targetID: this.props.target.id, + rules: data.rules.map(r => + pick(r, ['id', 'start', 'end', 'weekdayFilter']), + ), + } + return ( + { + if (!this.state.value) { + // no changes + this.props.onClose() + return + } + commit({ + variables: { + input: { + target: this.props.target, + scheduleID: this.props.scheduleID, + + rules: this.state.value.rules, + }, + }, + }) + }} + form={ + this.setState({ value })} + /> + } + /> + ) + } +} diff --git a/web/src/app/schedules/ScheduleRuleForm.js b/web/src/app/schedules/ScheduleRuleForm.js new file mode 100644 index 0000000000..fda85519f9 --- /dev/null +++ b/web/src/app/schedules/ScheduleRuleForm.js @@ -0,0 +1,345 @@ +import React from 'react' +import p from 'prop-types' +import { FormContainer, FormField } from '../forms' +import { + Grid, + Checkbox, + Table, + TableHead, + TableRow, + TableCell, + Hidden, + IconButton, + TableBody, + withStyles, + MenuItem, + TextField, + Typography, +} from '@material-ui/core' +import { UserSelect, RotationSelect } from '../selection' +import { startCase } from 'lodash-es' +import { Add, Trash } from '../icons' +import { TimePicker } from 'material-ui-pickers' +import { ScheduleTZFilter } from './ScheduleTZFilter' +import { oneOfShape } from '../util/propTypes' +import Query from '../util/Query' +import gql from 'graphql-tag' +import { connect } from 'react-redux' +import { urlParamSelector } from '../selectors' +import { parseClock, formatClock, mapRuleTZ } from './util' + +const days = [ + 'Sunday', + 'Monday', + 'Tuesday', + 'Wednesday', + 'Thursday', + 'Friday', + 'Saturday', +] + +const renderDaysValue = value => { + const parts = [] + let start = '' + let last = '' + let lastIdx = -1 + + const flush = () => { + if (lastIdx === -1) return + if (start === last) { + parts.push(start) + } else { + parts.push(start + '—' + last) + } + lastIdx = -1 + } + + days.forEach((day, idx) => { + const enabled = value.includes(day) + if (lastIdx === -1 && enabled) { + start = day.substr(0, 3) + last = start + lastIdx = idx + } else if (enabled) { + lastIdx = idx + last = day.substr(0, 3) + } else if (lastIdx !== -1) { + flush() + } + }) + + flush() + return parts.join(',') +} + +const styles = theme => { + return { + noPadding: { + padding: 0, + }, + dayFilter: { + padding: 0, + paddingRight: '1em', + }, + startEnd: { + padding: 0, + minWidth: '6em', + paddingRight: '1em', + }, + tzNote: { + display: 'flex', + alignItems: 'center', + }, + } +} + +const query = gql` + query($id: ID!) { + schedule(id: $id) { + id + timeZone + } + } +` + +@withStyles(styles) +@connect(state => ({ zone: urlParamSelector(state)('tz', 'local') })) +export default class ScheduleRuleForm extends React.PureComponent { + static propTypes = { + targetType: p.oneOf(['rotation', 'user']).isRequired, + targetDisabled: p.bool, + + // one of scheduleID or scheduleTimeZone must be specified + tz: oneOfShape({ + scheduleID: p.string, + scheduleTimeZone: p.string, + }), + + value: p.shape({ + targetID: p.string.isRequired, + rules: p.arrayOf( + p.shape({ + start: p.string.isRequired, + end: p.string.isRequired, + + weekdayFilter: p.arrayOf(p.bool).isRequired, + }), + ).isRequired, + }).isRequired, + } + + render() { + if (this.props.scheduleTimeZone) { + return this.renderForm(this.props.scheduleTimeZone) + } + return ( + this.renderForm(data.schedule.timeZone)} + /> + ) + } + + renderForm(scheduleTZ) { + const { + zone: displayTZ, + targetDisabled, + targetType, + classes, + ...formProps + } = this.props + + let mapValue = value => value + let mapOnChangeValue = value => value + + if (displayTZ !== scheduleTZ) { + // handle zone conversions for rules, from schedule TZ to display TZ + mapValue = value => { + return { + ...value, + rules: value.rules.map(rule => + mapRuleTZ(scheduleTZ, displayTZ, rule), + ), + } + } + mapOnChangeValue = value => { + return { + ...value, + rules: value.rules.map(rule => + mapRuleTZ(displayTZ, scheduleTZ, rule), + ), + } + } + } + + return ( + + + + + Times and weekdays shown in{' '} + {displayTZ === 'local' ? 'local time' : displayTZ}. + + + + {/* Purposefully leaving out of form, as it's only used for converting display times. */} + `Configure in ${tz}`} + scheduleID={this.props.scheduleID} + /> + + + + + + + + + Start + End + + {days.map(d => ( + + {d.slice(0, 3)} + + ))} + + + Days + + + + this.props.onChange({ + ...this.props.value, + rules: this.props.value.rules.concat({ + start: '00:00', + end: '00:00', + weekdayFilter: days.map(d => true), + }), + }) + } + > + + + + + + + {this.props.value.rules.map((r, idx) => + this.renderRuleField(idx), + )} + +
    +
    +
    +
    + ) + } + + renderRuleField(idx) { + const parseValue = value => parseClock(value, this.props.zone) + const classes = this.props.classes + return ( + + + + + + + + + {days.map((day, dayIdx) => ( + + + + ))} + + + + days.filter((d, idx) => value[idx])} + mapOnChangeValue={value => days.map(day => value.includes(day))} + > + {days.map(day => ( + + {day} + + ))} + + + + + {this.props.value.rules.length > 1 && ( + + this.props.onChange({ + ...this.props.value, + rules: this.props.value.rules.filter((r, i) => i !== idx), + }) + } + > + + + )} + + + ) + } +} diff --git a/web/src/app/schedules/ScheduleRuleList.js b/web/src/app/schedules/ScheduleRuleList.js new file mode 100644 index 0000000000..6c74a2c286 --- /dev/null +++ b/web/src/app/schedules/ScheduleRuleList.js @@ -0,0 +1,161 @@ +import React from 'react' +import p from 'prop-types' +import Query from '../util/Query' +import gql from 'graphql-tag' +import FlatList from '../lists/FlatList' +import { ScheduleTZFilter } from './ScheduleTZFilter' +import { Grid, Card } from '@material-ui/core' +import FilterContainer from '../util/FilterContainer' +import PageActions from '../util/PageActions' +import { connect } from 'react-redux' +import { urlParamSelector } from '../selectors' +import { startCase, sortBy } from 'lodash-es' +import { RotationAvatar, UserAvatar } from '../util/avatar' +import OtherActions from '../util/OtherActions' +import SpeedDial from '../util/SpeedDial' +import { AccountPlus, AccountMultiplePlus } from 'mdi-material-ui' +import ScheduleRuleCreateDialog from './ScheduleRuleCreateDialog' +import { ruleSummary } from './util' +import ScheduleRuleEditDialog from './ScheduleRuleEditDialog' +import ScheduleRuleDeleteDialog from './ScheduleRuleDeleteDialog' +import { resetURLParams } from '../actions' + +const query = gql` + query scheduleRules($id: ID!) { + schedule(id: $id) { + id + timeZone + targets { + target { + id + type + name + } + rules { + id + start + end + weekdayFilter + } + } + } + } +` + +@connect( + state => ({ zone: urlParamSelector(state)('tz', 'local') }), + dispatch => ({ resetFilter: () => dispatch(resetURLParams('tz')) }), +) +export default class ScheduleRuleList extends React.PureComponent { + static propTypes = { + scheduleID: p.string.isRequired, + } + + state = { + editTarget: null, + deleteTarget: null, + createType: null, + } + + render() { + return ( + + this.renderList(data.schedule.targets, data.schedule.timeZone) + } + /> + ) + } + getHeaderNote() { + const zone = this.props.zone + return `Showing times in ${zone === 'local' ? 'local time' : zone}.` + } + renderList(targets, timeZone) { + const items = [] + + let lastType + sortBy(targets, ['target.type', 'target.name']).forEach(tgt => { + const { name, id, type } = tgt.target + if (type !== lastType) { + items.push({ subHeader: startCase(type + 's') }) + lastType = type + } + + items.push({ + title: name, + url: (type === 'rotation' ? '/rotations/' : '/users/') + id, + subText: ruleSummary(tgt.rules, timeZone, this.props.zone), + icon: + type === 'rotation' ? : , + action: ( + this.setState({ editTarget: { type, id } }), + }, + { + label: 'Delete', + onClick: () => this.setState({ deleteTarget: { type, id } }), + }, + ]} + /> + ), + }) + }) + + return ( + + + this.props.resetFilter()}> + + + + + this.setState({ createType: 'rotation' }), + icon: , + }, + { + label: 'Add User', + onClick: () => this.setState({ createType: 'user' }), + icon: , + }, + ]} + /> + + + + + + {this.state.createType && ( + this.setState({ createType: null })} + /> + )} + {this.state.editTarget && ( + this.setState({ editTarget: null })} + /> + )} + {this.state.deleteTarget && ( + this.setState({ deleteTarget: null })} + /> + )} + + ) + } +} diff --git a/web/src/app/schedules/ScheduleShiftList.js b/web/src/app/schedules/ScheduleShiftList.js new file mode 100644 index 0000000000..07eaa8b3c5 --- /dev/null +++ b/web/src/app/schedules/ScheduleShiftList.js @@ -0,0 +1,370 @@ +import React from 'react' +import { DateTime, Duration, Interval } from 'luxon' +import p from 'prop-types' +import FlatList from '../lists/FlatList' +import gql from 'graphql-tag' +import { withQuery } from '../util/Query' +import { connect } from 'react-redux' +import { urlParamSelector } from '../selectors' +import { relativeDate } from '../util/timeFormat' +import { + Card, + Grid, + FormControlLabel, + Switch, + InputAdornment, + IconButton, + TextField, + MenuItem, +} from '@material-ui/core' +import { DateRange } from '@material-ui/icons' +import { UserAvatar } from '../util/avatar' +import PageActions from '../util/PageActions' +import FilterContainer from '../util/FilterContainer' +import { UserSelect } from '../selection' +import { setURLParam, resetURLParams } from '../actions' +import { DatePicker } from 'material-ui-pickers' +import { ScheduleTZFilter } from './ScheduleTZFilter' +import ScheduleNewOverrideFAB from './ScheduleNewOverrideFAB' +import ScheduleOverrideCreateDialog from './ScheduleOverrideCreateDialog' + +// query name is important, as it's used for refetching data after mutations +const query = gql` + query scheduleShifts($id: ID!, $start: ISOTimestamp!, $end: ISOTimestamp!) { + schedule(id: $id) { + id + shifts(start: $start, end: $end) { + user { + id + name + } + start + end + truncated + } + } + } +` + +const durString = dur => { + if (dur.months) { + return `${dur.months} month${dur.months > 1 ? 's' : ''}` + } else if (dur.days % 7 === 0) { + const weeks = dur.days / 7 + return `${weeks} week${weeks > 1 ? 's' : ''}` + } else { + return `${dur.days} day${dur.days > 1 ? 's' : ''}` + } +} + +const mapQueryToProps = ({ data }) => { + return { + shifts: data.schedule.shifts.map(s => ({ + ...s, + userID: s.user.id, + userName: s.user.name, + })), + } +} +const mapPropsToQueryProps = ({ scheduleID, start, end }) => ({ + variables: { + id: scheduleID, + start, + end, + }, +}) + +const mapStateToProps = state => { + const duration = urlParamSelector(state)('duration', 'P14D') + const zone = urlParamSelector(state)('tz', 'local') + let start = urlParamSelector(state)( + 'start', + DateTime.fromObject({ zone }) + .startOf('day') + .toISO(), + ) + + const activeOnly = urlParamSelector(state)('activeOnly', false) + if (activeOnly) { + start = DateTime.fromObject({ zone }).toISO() + } + + let end = DateTime.fromISO(start, { zone }) + .plus(Duration.fromISO(duration)) + .toISO() + + return { + start, + end, + userFilter: urlParamSelector(state)('userFilter', []), + activeOnly, + duration, + zone, + } +} + +const mapDispatchToProps = dispatch => { + return { + setUserFilter: value => dispatch(setURLParam('userFilter', value)), + setActiveOnly: value => dispatch(setURLParam('activeOnly', value)), + setDuration: value => dispatch(setURLParam('duration', value, 'P14D')), + setStart: value => dispatch(setURLParam('start', value)), + resetFilter: () => + dispatch( + resetURLParams('userFilter', 'start', 'activeOnly', 'tz', 'duration'), + ), + } +} + +@connect( + mapStateToProps, + mapDispatchToProps, +) +@withQuery(query, mapQueryToProps, mapPropsToQueryProps) +export default class ScheduleShiftList extends React.PureComponent { + static propTypes = { + scheduleID: p.string.isRequired, + + // provided by connect + start: p.string.isRequired, + end: p.string.isRequired, + zone: p.string.isRequired, + + // provided by withQuery + shifts: p.arrayOf( + p.shape({ + start: p.string.isRequired, + end: p.string.isRequired, + userID: p.string.isRequired, + userName: p.string.isRequired, + truncated: p.bool, + }), + ), + } + + static defaultProps = { + shifts: [], + } + + state = { + create: null, + specifyDuration: false, + } + + items() { + const { + shifts: _shifts, + start, + end, + userFilter, + activeOnly, + zone, + } = this.props + + let shifts = _shifts + .filter(s => !userFilter.length || userFilter.includes(s.userID)) + .map(s => ({ + ...s, + start: DateTime.fromISO(s.start, { zone }), + end: DateTime.fromISO(s.end, { zone }), + interval: Interval.fromDateTimes( + DateTime.fromISO(s.start, { zone }), + DateTime.fromISO(s.end, { zone }), + ), + })) + + if (activeOnly) { + const now = DateTime.fromObject({ zone }) + shifts = shifts.filter(s => s.interval.contains(now)) + } + + if (!shifts.length) return [] + + const displaySpan = Interval.fromDateTimes( + DateTime.fromISO(start, { zone }).startOf('day'), + DateTime.fromISO(end, { zone }).startOf('day'), + ) + + const result = [] + displaySpan.splitBy({ days: 1 }).forEach(day => { + const dayShifts = shifts.filter(s => day.overlaps(s.interval)) + if (!dayShifts.length) return + result.push({ + subHeader: relativeDate(day.start), + }) + dayShifts.forEach(s => { + let shiftDetails = '' + const startTime = s.start.toLocaleString({ + hour: 'numeric', + minute: 'numeric', + }) + const endTime = s.end.toLocaleString({ + hour: 'numeric', + minute: 'numeric', + }) + if (s.interval.engulfs(day)) { + // shift (s.interval) spans all day + shiftDetails = 'All day' + } else if (day.engulfs(s.interval)) { + // shift is inside the day + shiftDetails = `From ${startTime} to ${endTime}` + } else if (day.contains(s.end)) { + shiftDetails = `Active until${ + s.truncated ? ' at least' : '' + } ${endTime}` + } else { + // shift starts and continues on for the rest of the day + shiftDetails = `Active after ${startTime}` + } + result.push({ + title: s.userName, + subText: shiftDetails, + icon: , + }) + }) + }) + + return result + } + + renderDurationSelector() { + // Dropdown options (in ISO_8601 format) + // https://en.wikipedia.org/wiki/ISO_8601#Durations + const quickOptions = ['P1D', 'P3D', 'P7D', 'P14D', 'P1M'] + const clamp = (min, max, value) => Math.min(max, Math.max(min, value)) + + if ( + quickOptions.includes(this.props.duration) && + !this.state.specifyDuration + ) { + return ( + { + e.target.value === 'SPECIFY' + ? this.setState({ specifyDuration: true }) + : this.props.setDuration(e.target.value) + }} + > + {quickOptions.map(opt => ( + + {durString(Duration.fromISO(opt))} + + ))} + Specify... + + ) + } + return ( + { + this.props.setDuration( + Duration.fromObject({ + days: clamp(1, 30, parseInt(e.target.value, 10)), + }).toISO(), + ) + }} + /> + ) + } + + render() { + const zone = this.props.zone + const dur = Duration.fromISO(this.props.duration) + + const timeStr = durString(dur) + + const zoneText = zone === 'local' ? 'local time' : zone + const userText = this.props.userFilter.length ? ' for selected users' : '' + const note = this.props.activeOnly + ? `Showing currently active shifts${userText} in ${zoneText}.` + : `Showing shifts${userText} up to ${timeStr} from ${DateTime.fromISO( + this.props.start, + { + zone, + }, + ).toLocaleString()} in ${zoneText}.` + return ( + + + { + this.props.resetFilter() + this.setState({ specifyDuration: false }) + }} + > + + this.props.setActiveOnly(e.target.checked)} + value='activeOnly' + /> + } + label='Active shifts only' + /> + + + + + + this.props.setStart(e.toISO())} + showTodayButton + autoOk + InputProps={{ + endAdornment: ( + + + + + + ), + }} + /> + + + {this.renderDurationSelector()} + + + + + + this.setState({ create: variant })} + /> + + + + + {this.state.create && ( + this.setState({ create: null })} + /> + )} + + ) + } +} diff --git a/web/src/app/schedules/ScheduleTZFilter.js b/web/src/app/schedules/ScheduleTZFilter.js new file mode 100644 index 0000000000..ac64419bb6 --- /dev/null +++ b/web/src/app/schedules/ScheduleTZFilter.js @@ -0,0 +1,69 @@ +import React from 'react' +import p from 'prop-types' +import { connect } from 'react-redux' +import { urlParamSelector } from '../selectors' +import { setURLParam } from '../actions' +import Query from '../util/Query' +import gql from 'graphql-tag' +import { FormControlLabel, Switch } from '@material-ui/core' +import { oneOfShape } from '../util/propTypes' + +const tzQuery = gql` + query($id: ID!) { + schedule(id: $id) { + id + timeZone + } + } +` + +@connect( + state => ({ zone: urlParamSelector(state)('tz', 'local') }), + dispatch => ({ + setZone: value => dispatch(setURLParam('tz', value, 'local')), + }), +) +export class ScheduleTZFilter extends React.PureComponent { + static propTypes = { + label: p.func, + + // one of scheduleID or scheduleTimeZone must be specified + _tz: oneOfShape({ + scheduleID: p.string, + scheduleTimeZone: p.string, + }), + + // provided by connect + zone: p.string, + setZone: p.func, + } + render() { + const { scheduleID, scheduleTimeZone } = this.props + if (scheduleTimeZone) return this.renderControl(scheduleTimeZone) + + return ( + this.renderControl(data.schedule.timeZone)} + /> + ) + } + + renderControl(tz) { + const { zone, label, setZone } = this.props + return ( + setZone(e.target.checked ? tz : 'local')} + value={tz} + /> + } + label={label ? label(tz) : `Show times in ${tz}`} + /> + ) + } +} diff --git a/web/src/app/schedules/util.js b/web/src/app/schedules/util.js new file mode 100644 index 0000000000..992f9695db --- /dev/null +++ b/web/src/app/schedules/util.js @@ -0,0 +1,161 @@ +import { DateTime } from 'luxon' + +export const days = [ + 'Sunday', + 'Monday', + 'Tuesday', + 'Wednesday', + 'Thursday', + 'Friday', + 'Saturday', +] + +// Shifts a weekdayFilter so that it matches the luxon day n +// +// Default is 7 (Sunday) +export function alignWeekdayFilter(n, filter) { + if (n === 7) return filter + + return filter.slice(7 - n).concat(filter.slice(0, 7 - n)) +} + +export function mapRuleTZ(fromTZ, toTZ, rule) { + const start = parseClock(rule.start, fromTZ).setZone(toTZ) + const end = parseClock(rule.end, fromTZ).setZone(toTZ) + return { + ...rule, + start: formatClock(start), + end: formatClock(end), + weekdayFilter: alignWeekdayFilter(start.weekday, rule.weekdayFilter), + } +} + +export function weekdaySummary(filter) { + const bin = filter.map(f => (f ? '1' : '0')).join('') + switch (bin) { + case '1000001': + return 'Weekends' + case '0000000': + return 'Never' + case '0111110': + return 'M—F' + case '0111111': + return 'M—F and Sat' + case '1111110': + return 'M—F and Sun' + case '1111111': + return 'Everyday' + } + + let d = [] + let chain = [] + const flush = () => { + if (chain.length < 3) { + chain.forEach(day => d.push(day.slice(0, 3))) + chain = [] + return + } + + d.push(chain[0].slice(0, 3) + '—' + chain[chain.length - 1].slice(0, 3)) + chain = [] + } + days.forEach((day, idx) => { + if (filter[idx]) { + chain.push(day) + return + } + flush() + }) + flush() + return d.join(', ') +} + +export function parseClock(s, zone) { + return DateTime.fromObject({ + hours: parseInt(s.split(':')[0], 10), + minutes: parseInt(s.split(':')[1], 10), + weekday: 7, // sunday + zone, + }) +} + +export function formatClock(dt) { + return `${dt.hour + .toString() + .padStart(2, '0')}:${dt.minute.toString().padStart(2, '0')}` +} + +export function ruleSummary(rules, scheduleZone, displayZone) { + const everyDay = r => !r.weekdayFilter.some(w => !w) && r.start === r.end + + rules = rules.filter(r => r.weekdayFilter.some(w => w)) // ignore disabled + if (rules.length === 0) return 'Never' + if (rules.some(everyDay)) return 'Always' + + const getTime = str => parseClock(str, scheduleZone).setZone(displayZone) + + return rules + .map(r => { + const start = getTime(r.start) + const weekdayFilter = alignWeekdayFilter(start.weekday, r.weekdayFilter) + return `${weekdaySummary(weekdayFilter)} from ${start.toLocaleString( + DateTime.TIME_SIMPLE, + )} to ${getTime(r.end).toLocaleString(DateTime.TIME_SIMPLE)} ` + }) + .join('\n') +} + +export function formatOverrideTime(_start, _end, zone) { + const start = + _start instanceof DateTime + ? _start.setZone(zone) + : DateTime.fromISO(_start, { zone }) + const end = + _end instanceof DateTime + ? _end.setZone(zone) + : DateTime.fromISO(_end, { zone }) + const sameDay = start.startOf('day').equals(end.startOf('day')) + return `${start.toLocaleString( + DateTime.DATETIME_MED, + )} to ${end.toLocaleString( + sameDay ? DateTime.TIME_SIMPLE : DateTime.DATETIME_MED, + )}` +} + +export function mapOverrideUserError(conflictingOverride, value, zone) { + if (!conflictingOverride) return [] + + const errs = [] + const isReplace = + conflictingOverride.addUser && conflictingOverride.removeUser + + const replaceMsg = add => + add + ? `replacing ${conflictingOverride.removeUser.name}` + : `replaced by ${conflictingOverride.addUser.name}` + + const time = formatOverrideTime( + conflictingOverride.start, + conflictingOverride.end, + zone, + ) + + const check = (valueField, errField) => { + if (!conflictingOverride[errField]) return + const verb = errField === 'addUser' ? 'added' : 'removed' + if (value[valueField] === conflictingOverride[errField].id) { + errs.push({ + field: valueField, + message: `Already ${ + isReplace ? replaceMsg(errField === 'addUser') : verb + } from ${time}`, + }) + } + } + check('addUserID', 'addUser') + check('addUserID', 'removeUser') + check('removeUserID', 'addUser') + check('removeUserID', 'removeUser') + + return errs +} diff --git a/web/src/app/schedules/util.test.js b/web/src/app/schedules/util.test.js new file mode 100644 index 0000000000..b8b0020d27 --- /dev/null +++ b/web/src/app/schedules/util.test.js @@ -0,0 +1,112 @@ +import { mapOverrideUserError, alignWeekdayFilter, mapRuleTZ } from './util' +import _ from 'lodash' + +const fromBin = f => f.split('').map(f => f === '1') + +describe('mapRuleTZ', () => { + const check = (rule, fromTZ, toTZ, expected) => { + rule.weekdayFilter = fromBin(rule.f) + expected.weekdayFilter = fromBin(expected.f) + + expect(mapRuleTZ(fromTZ, toTZ, _.omit(rule, 'f'))).toEqual( + _.omit(expected, 'f'), + ) + } + it('should not change same TZ', () => { + check({ start: '00:00', end: '00:00', f: '1000000' }, 'UTC', 'UTC', { + start: '00:00', + end: '00:00', + f: '1000000', + }) + }) + + it('should map across days, and back', () => { + check({ start: '00:00', end: '00:00', f: '1000000' }, 'UTC', 'UTC-6', { + start: '18:00', + end: '18:00', + f: '0000001', + }) + + check( + { + start: '18:00', + end: '18:00', + f: '0000001', + }, + 'UTC-6', + 'UTC', + { start: '00:00', end: '00:00', f: '1000000' }, + ) + }) +}) + +describe('alignWeekdayFilter', () => { + const check = (input, n, expected) => + expect(alignWeekdayFilter(n, fromBin(input))).toEqual(fromBin(expected)) + + it('should leave aligned filters alone', () => { + check('1010101', 7, '1010101') + check('1010001', 7, '1010001') + check('1111111', 7, '1111111') + }) + + it('should align differences', () => { + // sunday becomes sat + check('1000000', 6, '0000001') + check('0010000', 6, '0100000') + // sunday becomes mon + check('1000000', 1, '0100000') + check('0010000', 1, '0001000') + }) +}) + +describe('mapOverrideUserError', () => { + const data = { + start: '2019-01-02T20:33:10.363Z', + end: '2019-01-02T21:33:10.363Z', + } + const timeStr = 'Jan 2, 2019, 2:33 PM to 3:33 PM' + const zone = 'America/Chicago' + + const add = { ...data, addUser: { id: 'foo', name: 'bob' } } + const remove = { ...data, removeUser: { id: 'bar', name: 'ben' } } + const replace = { ...add, ...remove } + + const check = (override, value, errs) => + expect(mapOverrideUserError(override, value, zone)).toEqual(errs) + + it('should generate proper error messages', () => { + check(add, { addUserID: 'foo' }, [ + { + field: 'addUserID', + message: 'Already added from ' + timeStr, + }, + ]) + + check(replace, { addUserID: 'bar' }, [ + { + field: 'addUserID', + message: 'Already replaced by bob from ' + timeStr, + }, + ]) + check(replace, { addUserID: 'foo' }, [ + { + field: 'addUserID', + message: 'Already replacing ben from ' + timeStr, + }, + ]) + check(remove, { addUserID: 'bar' }, [ + { + field: 'addUserID', + message: 'Already removed from ' + timeStr, + }, + ]) + + check(add, { removeUserID: 'foo' }, [ + { + field: 'removeUserID', + message: 'Already added from ' + timeStr, + }, + ]) + }) +}) diff --git a/web/src/app/selection/EscalationPolicySelect.js b/web/src/app/selection/EscalationPolicySelect.js new file mode 100644 index 0000000000..1f81a04f37 --- /dev/null +++ b/web/src/app/selection/EscalationPolicySelect.js @@ -0,0 +1,29 @@ +import React from 'react' + +import gql from 'graphql-tag' +import QuerySelect from './QuerySelect' + +const query = gql` + query($input: EscalationPolicySearchOptions) { + escalationPolicies(input: $input) { + nodes { + id + name + } + } + } +` + +const valueQuery = gql` + query($id: ID!) { + escalationPolicy(id: $id) { + id + name + } + } +` +export class EscalationPolicySelect extends React.PureComponent { + render() { + return + } +} diff --git a/web/src/app/selection/LabelKeySelect.js b/web/src/app/selection/LabelKeySelect.js new file mode 100644 index 0000000000..3457d51ca1 --- /dev/null +++ b/web/src/app/selection/LabelKeySelect.js @@ -0,0 +1,30 @@ +import React from 'react' +import QuerySelect from './QuerySelect' +import gql from 'graphql-tag' + +const query = gql` + query($input: LabelSearchOptions) { + labels(input: $input) { + nodes { + key + } + } + } +` + +export class LabelKeySelect extends React.PureComponent { + render() { + return ( + ({ + label: node.key, + value: node.key, + })} + query={query} + /> + ) + } +} diff --git a/web/src/app/selection/MaterialSelect.js b/web/src/app/selection/MaterialSelect.js new file mode 100644 index 0000000000..3c3603472d --- /dev/null +++ b/web/src/app/selection/MaterialSelect.js @@ -0,0 +1,98 @@ +import React, { Component } from 'react' +import { PropTypes as p } from 'prop-types' +import { withStyles } from '@material-ui/core/styles' +import Select from 'react-select' +import { components, styles } from './MaterialSelectComponents' + +const valueShape = p.shape({ + label: p.string.isRequired, + value: p.string.isRequired, + icon: p.element, +}) + +// valueCheck ensures the type is `arrayOf(p.string)` if `multiple` is set +// and `p.string` otherwise. +function valueCheck(props, ...args) { + if (props.multiple) return p.arrayOf(valueShape).isRequired(props, ...args) + return valueShape(props, ...args) +} + +@withStyles(styles, { withTheme: true }) +export default class MaterialSelect extends Component { + static propTypes = { + multiple: p.bool, // allow selecting multiple values + required: p.bool, + onChange: p.func.isRequired, + onInputChange: p.func, + options: p.arrayOf(valueShape).isRequired, + placeholder: p.string, + value: valueCheck, + } + + static defaultProps = { + options: [], + } + + render() { + const { + multiple, + noClear, + theme, + value, + onChange, + classes, + disabled, + required, + + label, + name, + placeholder, + InputLabelProps, + + ...props + } = this.props + + const selectStyles = { + input: base => ({ + ...base, + color: theme.palette.text.primary, + }), + } + + let textFieldProps = { + required, + label, + placeholder, + InputLabelProps, + value: value ? (multiple ? value.join(',') : value.value) : '', + } + + return ( +
    +