Compare commits

...

69 Commits

Author SHA1 Message Date
mediocregopher cefe5633e3 Remove reference to Tutorial 4 from README 4 years ago
mediocregopher ff041c74aa update ROADMAP, project is publicized 4 years ago
mediocregopher 68b4e68177 changes from proofreading tutorials 4 years ago
mediocregopher a709a43696 Re-organize and flesh-out documentation, including writing the tutorials 4 years ago
mediocregopher a1579dcc96 Switch markdown generator in git-http-server 4 years ago
mediocregopher 3cd0b03202 add --descr flag to commit credential command 4 years ago
mediocregopher 6176b9ffbc add change_description field to credential commits 4 years ago
mediocregopher f085f13fa8 Remove Payload.MessageHead error return and simplify implementations 4 years ago
mediocregopher f52ea2a708 Rework how FilterFilesChanged works 4 years ago
mediocregopher 38d396e90c Fix a bug which prevented force pushing to a previous commit in a branch 4 years ago
mediocregopher 23fa9da972 properly check for gpg not returning a matching pgp key 4 years ago
mediocregopher 4389da48e4 Fix a bug in typeobj when a type field's name is the same as one of its inner... 4 years ago
mediocregopher 3ec0908b32 Remove extra lines from lists in README and SPEC, because apparently it matters 4 years ago
mediocregopher 422c444a50 change pre-receive hook from flag to sub-command 4 years ago
mediocregopher b01fe1524a Completely refactor naming of everything, in light of new SPEC 4 years ago
mediocregopher 351048e9aa Completely rewrite SPEC 4 years ago
mediocregopher c2c7fdf691 Support non-fastforward commits 4 years ago
mediocregopher 84399603cf refactor how file changes and hashes are handled, and add tests for hashes 4 years ago
mediocregopher 1f892070bd some small convenience changes for tests 4 years ago
mediocregopher 3d89fe5fd9 Refactor how commit authorship is formatted 4 years ago
mediocregopher 43b564e711 refactor pgp types a bit to use the openpgp.Entity type 4 years ago
mediocregopher a01f2b1512 implement the ability for users without an account to still submit accredited co... 4 years ago
mediocregopher 03459d4b20 implement amend flag for change and comment commit types 4 years ago
mediocregopher e78efcce74 short hash support 4 years ago
mediocregopher ca4887bf07 force all commits to be ancestors of the root of main 4 years ago
mediocregopher 54af1ee510 add init command 4 years ago
mediocregopher d189d46667 refactor dehub binary's flags a bit 4 years ago
mediocregopher 921572d053 Documentation fixes for dehub-remote and git-http-server 4 years ago
mediocregopher d5f172875e Refactor all documentation 4 years ago
mediocregopher 4d56716fe8 Give the project a proper root, dehub.dev/src/dehub.git 4 years ago
mediocregopher e91f7b060e make docker-remote not force a .git extension 4 years ago
mediocregopher ee19d2c37e give git-http-server requests ability to specify specific revision 4 years ago
mediocregopher d6f5bf2e38 rename http-server to dehub-remote 4 years ago
mediocregopher a3b98cc4ef move Dockerfile into the root, make it build git-http-server as well 4 years ago
mediocregopher 2390197ae3 implement basic git-http-server 4 years ago
mediocregopher a8c7f92328 include commit hashes as part of credential commit when accrediting change commi... 4 years ago
mediocregopher aa1a4969f3 add ability to credential range of change commits 4 years ago
mediocregopher f3226d6171 Refactor combined commits a bit 4 years ago
mediocregopher 8cbdc03caa Implement commit combining 4 years ago
mediocregopher 1c2bc11fc3 refactor how commits are created, as well as how reference following is done 4 years ago
mediocregopher a47404b4a7 update SPEC in light of comment commit changes 4 years ago
mediocregopher eeb74ea22b Implement comment commits 4 years ago
mediocregopher b74186446e refactor commit interface to pass more information around 4 years ago
mediocregopher 1f422511d5 completely refactor accessctl (again) 4 years ago
mediocregopher 5ebb6597a8 normalize how git commits are interacted with, including changing VerifyComit -> VerifyCommits 4 years ago
mediocregopher 326de2afc6 Fully implement credential commits 4 years ago
mediocregopher 69e336ea5e rename commit_signature.go to commit_credential.go 4 years ago
mediocregopher a580018e1e Implement dcmd package and use it to simplify cmd/dehub's implementation significantly 4 years ago
mediocregopher aff3daab19 Modify how SignifierInterface is produced so it always sets AccountID on Credentials 4 years ago
mediocregopher c87baa5192 Implement credential commit object, but don't use it anywhere yet 4 years ago
mediocregopher 51af20fbc0 Add test which ensures all previous commits of this repo still verify 4 years ago
mediocregopher cf05b3a072 Refactor commit type and logic to account for future commit types 4 years ago
mediocregopher 9bfd012221 Add 'default' tag functionality to typeobj 4 years ago
mediocregopher 981fbb8327 Rename trunk branch to main branch 4 years ago
mediocregopher 1db3f2bd1e Add authorship to immediate milestone in ROADMAP 4 years ago
mediocregopher 0af9c04e33 Add ROADMAP 4 years ago
mediocregopher 76309b51cb Refactor access controls to support multiple branches 4 years ago
mediocregopher 2add3a2501 Have the dehub command check if there are any staged changes before committing 4 years ago
mediocregopher 98d8aed08a Add support for using EDITOR to construct commit messages. 4 years ago
mediocregopher 3344b8372d rename TrunkCommit to ChangeCommit, in accordance with the new SPEC 4 years ago
mediocregopher 492e7242c6 refactor SPEC, plus some small changes to INTRODUCTION 4 years ago
mediocregopher b565d26d1f actually set the default branch to trunk in the http-server, forgot to add the changes >_< 4 years ago
mediocregopher 14c37ae1ba set the default branch to trunk in the http-server 4 years ago
mediocregopher 85d79ff71a add INTRODUCTION and update http-server/README 4 years ago
mediocregopher 72f2a74415 specify the http root in the nginx conf file 4 years ago
mediocregopher a5bee27892 Change all references to 'master' into 'trunk' 4 years ago
mediocregopher 181802ba0e add initial implementation of the http-server 4 years ago
mediocregopher f0310bda75 add pre-receive hook command 4 years ago
mediocregopher 3f4d48ff89 add -bare option to dehub command, refactoring sub-command flags at the same time 4 years ago
  1. 8
      .dehub/config.yml
  2. 4
      .gitignore
  3. 43
      Dockerfile.dehub-remote
  4. 104
      README.md
  5. 195
      SPEC.md
  6. 170
      accessctl/access_control.go
  7. 205
      accessctl/access_control_test.go
  8. 130
      accessctl/condition.go
  9. 110
      accessctl/condition_test.go
  10. 124
      accessctl/filter.go
  11. 26
      accessctl/filter_logical.go
  12. 32
      accessctl/filter_logical_test.go
  13. 96
      accessctl/filter_pattern.go
  14. 199
      accessctl/filter_pattern_test.go
  15. 113
      accessctl/filter_sig.go
  16. 124
      accessctl/filter_sig_test.go
  17. 137
      accessctl/filter_test.go
  18. 75
      change_hash.go
  19. 39
      cmd/dehub-remote/README.md
  20. 44
      cmd/dehub-remote/nginx.conf
  21. 86
      cmd/dehub-remote/start.sh
  22. 273
      cmd/dehub/cmd_commit.go
  23. 66
      cmd/dehub/cmd_hook.go
  24. 27
      cmd/dehub/cmd_misc.go
  25. 75
      cmd/dehub/cmd_util.go
  26. 48
      cmd/dehub/cmd_verify.go
  27. 191
      cmd/dehub/dcmd/dcmd.go
  28. 140
      cmd/dehub/main.go
  29. 72
      cmd/dehub/tmp_file.go
  30. 27
      cmd/git-http-server/README.md
  31. 11
      cmd/git-http-server/go.mod
  32. 79
      cmd/git-http-server/go.sum
  33. 154
      cmd/git-http-server/main.go
  34. 351
      commit.go
  35. 170
      commit_test.go
  36. 49
      config.go
  37. 36
      diff.go
  38. 71
      docs/ROADMAP.md
  39. 501
      docs/SPEC.md
  40. 128
      docs/tut0.md
  41. 178
      docs/tut1.md
  42. 262
      docs/tut2.md
  43. 246
      docs/tut3.md
  44. 83
      fingerprint.go
  45. 237
      fingerprint_test.go
  46. 2
      go.mod
  47. 628
      payload.go
  48. 176
      payload_change.go
  49. 183
      payload_change_test.go
  50. 39
      payload_comment.go
  51. 82
      payload_credential.go
  52. 50
      payload_credential_test.go
  53. 452
      payload_test.go
  54. 326
      project.go
  55. 289
      project_test.go
  56. 104
      repo.go
  57. 118
      repo_test.go
  58. 74
      sigcred/credential.go
  59. 58
      sigcred/credential_test.go
  60. 307
      sigcred/pgp.go
  61. 18
      sigcred/pgp_test.go
  62. 101
      sigcred/signifier.go
  63. 127
      typeobj/typeobj.go
  64. 143
      typeobj/typeobj_test.go
  65. 4
      yamlutil/yamlutil.go

@ -4,11 +4,3 @@ accounts:
signifiers:
- type: pgp_public_key_file
path: ".dehub/mediocregopher.asc"
access_controls:
- pattern: "**"
condition:
type: signature
account_ids:
- mediocregopher
count: 100%

4
.gitignore vendored

@ -1 +1,3 @@
dehub
/dehub
/git-http-server
/cmd/git-http-server/git-http-server

@ -0,0 +1,43 @@
FROM golang:1.14
WORKDIR /go/src/dehub
COPY . .
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o /usr/bin/dehub ./cmd/dehub
WORKDIR /go/src/dehub/cmd/git-http-server
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o /usr/bin/git-http-server .
FROM debian:jessie
# Setup Container
VOLUME ["/repos"]
EXPOSE 80
# Setup APT
RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
# Update, Install Prerequisites, Clean Up APT
RUN DEBIAN_FRONTEND=noninteractive apt-get -y update && \
apt-get -y install git wget nginx-full fcgiwrap && \
apt-get clean
# Setup Container User
RUN useradd -M -s /bin/false git --uid 1000
# Setup nginx fcgi services to run as user git, group git
RUN sed -i 's/FCGI_USER="www-data"/FCGI_USER="git"/g' /etc/init.d/fcgiwrap && \
sed -i 's/FCGI_GROUP="www-data"/FCGI_GROUP="git"/g' /etc/init.d/fcgiwrap && \
sed -i 's/FCGI_SOCKET_OWNER="www-data"/FCGI_SOCKET_OWNER="git"/g' /etc/init.d/fcgiwrap && \
sed -i 's/FCGI_SOCKET_GROUP="www-data"/FCGI_SOCKET_GROUP="git"/g' /etc/init.d/fcgiwrap
# Copy binaries
COPY --from=0 /usr/bin/dehub /usr/bin/dehub
COPY --from=0 /usr/bin/git-http-server /usr/bin/git-http-server
# Create config files for container startup and nginx
COPY cmd/dehub-remote/nginx.conf /etc/nginx/nginx.conf
# Create start.sh
COPY cmd/dehub-remote/start.sh /start.sh
RUN chmod +x /start.sh
ENTRYPOINT ["/start.sh"]

@ -0,0 +1,104 @@
# dehub
dehub aims to provide all the features of a git hosting platform, but without
the hosting part. These features include:
**User management** - Authentication that commits come from the user they say
they do, and fine-grained control over which users can do what.
**Pull requests and issues** - Facilitation of discussion via comment commits,
and fine-grained (down to the file level) sign-off requirements.
**Tags and releases** - Mark releases in the repo itself, and provide
immutable and verifiable git tags so there's never any funny business. (Not yet
implemented)
**Plugins**: Extend all aspects of dehub functionality via executables managed
in the repo itself (in the same style as git hooks). (Not yet implemented)
## Key Concepts
To implement these features, dehub combines two key concepts:
First, repo configuration is defined in the repo itself. A file called
`.dehub/config.yml` contains all information related to user accounts, their pgp
keys, branch and file level access controls, and more. Every commit must adhere
to the configuration of its parent in order to be considered _verifiable_. The
configuration file is committed to the repo like any other file would be, and so
is even able to define the access controls on itself.
Second, the commit message of every dehub commit contains a YAML encoded
payload, which allows dehub to extend git and provide multiple commit types,
each with its own capabilities and restrictions. Some example dehub commit types
are `change` commits, `comment` commits, and `credential` commits.
## Infrastructure (or lack thereof)
Because a dehub project is entirely housed within a traditional git project,
which is merely a collection of files, any existing git or network filesystem
infrastructure can be used to host any dehub project:
* The most barebones [git
daemon](https://git-scm.com/book/en/v2/Git-on-the-Server-Git-Daemon) server
(with a simple pre-receive hook set up).
* A remote SSH endpoint.
* A mailing list (aka the old school way).
* Network file syncing utilities such as dropbox,
[syncthing](https://github.com/syncthing/syncthing), or
[NFS](https://en.wikipedia.org/wiki/Network_File_System).
* Existing git project hosts like GitHub, Bitbucket, or Keybase.
* Decentralized filesystems such as IPFS. (Not yet implemented)
## Getting Started {#getting-started}
The dehub project itself can be found by cloning
`https://dehub.dev/src/dehub.git`.
Installation of the dehub tool is currently done via the `go get` command:
```
go get -u -v dehub.dev/src/dehub.git/cmd/dehub
```
This will install the binary to your `$GOBIN` path, which you'll want to put in
your `$PATH`. Run `go env` if you're not sure where your `$GOBIN` is.
Once installed, running `dehub -h` should show you the help output of the
command. You can continue on to the tutorials if you're not sure where to go
from here.
### Tutorials {#tutorials}
The following tutorials will guide you through the basic usage of dehub. Note
that dehub is in the infancy of its development, and so a certain level of
profiency with git and PGP is required in order to follow these tutorials.
* [Tutorial 0: Say Hello!](/docs/tut0.html)
* [Tutorial 1: Create Your Own Project](/docs/tut1.html)
* [Tutorial 2: Access Controls](/docs/tut2.html)
* [Tutorial 3: Commit Sign-Off](/docs/tut3.html)
### Documentation
The [SPEC](/docs/SPEC.html) is the best place to see every possible nitty-gritty
detail of how dehub works. It attempts to be both human-readable and exhaustive
in its coverage.
### Other links
[ROADMAP](/docs/ROADMAP.html) documents upcoming features and other work
required on the project. If you're looking to contribute, this is a great place
to start.
[dehub-remote](/cmd/dehub-remote/) is a simple docker image which can be used to
host a remote dehub project over http(s). The endpoint will automatically verify
all pushed commits.
[git-http-server](/cmd/git-http-server/) is a small server which makes a git
repo's file tree available via http. It will automatically render markdown files
to html as well. git-http-server is used to render dehub's website.

@ -1,195 +0,0 @@
# .dehub
The `.dehub` directory contains all meta information related to
decentralized repository management and access control.
## config.yml
The `.dehub/config.yml` file takes the following structure:
```yaml
# accounts defines all accounts which are known to the repo.
accounts:
# Each account is an object with an id and at least one identifier. The id
# must be unique for each account.
- id: some_user_id:
# signifiers describes different methods the account might use to
# identify itself. Generally, these will be different public keys which
# commits will be signed with. At least one is required.
signifiers:
- type: "pgp_public_key"
body: "FULL PGP PUBLIC KEY STRING"
- type: "pgp_public_key_file"
path: ".dehub/some_user_id.asc"
- type: "keybase"
user: "some_keybase_user_id"
# access_controls defines under what conditions different files in the repo may
# be modified. For each file modified in a commit, all access control patterns
# are applied sequentially until one matches, and the associated access control
# conditions are checked. A commit is only allowed if the conditions of all
# modified files are met.
access_controls:
# pattern is a glob pattern describing what files this access control
# applies to. Single star matches all characters except path separators,
# double star matches everything.
- pattern: ".dehub/**"
# signature conditions indicate that a commit must be signed by one or
# more accounts to be allowed.
condition:
type: signature
# account_ids lists all accounts whose signature will count towards
# meeting the condition
account_ids:
- some_user_id
# count describes how many signatures are required. It can be either a
# contrete integer (e.g. 2, meaning any 2 accounts listed by
# account_ids) or a percent.
count: 100%
# This catch-all pattern for the rest of the repo requires that changes to
# any files not under `.dehub/` are signed by at least one of the
# defined accounts.
- pattern: "**"
condition:
type: signature
any_account: true # indicates any account defined in accounts is valid
count: 1
```
# Master commit
All new commits being appended to the HEAD of the `master` branch are subject to
the following requirements:
* Must conform to all requirements defined by the `access_controls` section of
the `config.yml`, as found in the HEAD. If the commit is the initial commit of
the repo then it instead uses the `config.yml` found in itself.
* Must not be a merge commit (this may be amended later, but at present it
simplifies implementation).
* The commit message must conform to the format and semantics defined below.
## Master Commit Message
The commit message for a commit being appended to the HEAD of the `master`
branch must conform to the following format: a single line (the message head)
giving a short description of the change, then two newlines, then a body which
is a yaml formatted string:
```yaml
This is the message head. It will be re-iterated within the yaml body.
# Now the yaml body begins
---
message: >
This is the message head. It will be re-iterated within the yaml body.
The rest of this field is for the message body, which corresponds to the
body of a normal commit message which might give a more long-form
explanation of the commit's changes.
Since the message is used in generating the signature it's necessary for it
to be encoded here fully formed, even though the message head is then
duplicated. Otherwise the exact bytes of the message would be ambiguous.
This situation is ugly, but not unbearable.
# See the Commit Signatures section below for how this is computed. The
# change_hash is always recomputed when verifying a commit, but is reproduced in
# the commit message itself for cases of forward compatibility, e.g. if the
algorithm to compute the hash changes.
change_hash: XXX
# Credentials are the set of credentials which count towards requirements
# specified in the `access_controls` section of the `config.yml` file.
credentials:
- type: pgp_signature
account_id: some_user_id
pub_key_id: XXX
body: "base-64 signature body"
```
## Commit Signatures
When a commit is being signed by a signifier there is an expected data format
for the data to be signed. The format is a SHA-256 hash of the following pieces
of data concatenated together (the "change_hash"):
* A uvarint indicating the number of bytes in the commit message.
* The message.
* A uvarint indicating the number of files changed.
* For each file changed in the commit, ordered lexographically-ascending based
on its full relative path within the repo, the following is then written:
* A uvarint indicating the length of the full relative path of the file
within the repo.
* The full relative path of the file within the repo.
* A little-endian uint32 representing the previous file mode of the file (or 0
if the file is being inserted).
* The 20-byte SHA1 hash of the previous version of the file's contents (or 20
0 bytes if the file is being inserted).
* A little-endian uint32 representing the new file mode of the file (or 0
if the file is being deleted).
* The 20-byte SHA1 hash of the new version of the file's contents (or 20
0 bytes if the file is being deleted).
The raw output from the SHA-256 is then prepended with a `0` byte (for forward
compatibility) and signed, and the result used as the signature body.
# Merge Requests
A merge request (MR) may be pushed to the repository as a new branch at any
time. All MR branch names follow the naming convention `DHMR-short-description`.
An MR branch has the following qualities:
* Meta commits (see sub-section) will only contain a commit message head/body,
but no file changes.
* The most recent substantial commit (as opposed to meta commits) should always
contain the full commit message head and body.
## Meta Commits
Meta commits are those which add information about the changes being requested,
but do not modify the changes themselves.
### Signature Commits
Signature commits sign the changes requested in order to count towards their
access control requirements. The message head of these are arbitrary, but the
body must be formatted as such:
```yaml
# This object matches the one found in the `credentials` section of the master
# commit message.
type: pgp_signature
account_id: some_user_id ```
pub_key_id: XXX
body: "base-64 signature body" # see Commit Signatures sub-section.
```
If a signature commit is added to a MR branch, and a substantial commit is
added after it, then that signature commit will no longer be valid, as it was
only signing the the prior changeset. The signer will need to create and push a
new signature commit, if they agree with the new changes.
## Merging MRs
When an MR has accumulated enough meta commits to fulfuill access control
requirements it may be coalesced into a single commit destined for the master
branch. See the Master Commit Message sub-section for details on how commits in
the master branch must be formatted.
# TODO
* access control patterns related to who may push to MR branches, and what types
of commits they can push.

@ -1,53 +1,155 @@
// Package accessctl implements functionality related to allowing or denying
// actions in a repo based on who is taking what actions.
package accessctl
import (
"errors"
"fmt"
"github.com/bmatcuk/doublestar"
"dehub.dev/src/dehub.git/sigcred"
yaml "gopkg.in/yaml.v2"
)
// DefaultAccessControlsStr is the encoded form of the default access control
// set which is applied to all CommitRequests if no user-supplied ones match.
//
// The effect of these AccessControls is to allow all commit types on any branch
// (with the exception of the main branch, which only allows change commits), as
// long as the commit has one signature from a configured account.
var DefaultAccessControlsStr = `
- action: allow
filters:
- type: not
filter:
type: branch
pattern: main
- type: signature
any_account: true
count: 1
- action: deny
filters:
- type: commit_attributes
non_fast_forward: true
- action: allow
filters:
- type: branch
pattern: main
- type: payload_type
payload_type: change
- type: signature
any_account: true
count: 1
- action: deny
`
// DefaultAccessControls is the decoded form of DefaultAccessControlsStr.
var DefaultAccessControls = func() []AccessControl {
var acl []AccessControl
if err := yaml.Unmarshal([]byte(DefaultAccessControlsStr), &acl); err != nil {
panic(err)
}
return acl
}()
// CommitRequest is used to describe a set of interactions which are being
// requested to be performed.
type CommitRequest struct {
// Type describes what type of commit is being requested. Possibilities are
// determined by the requester.
Type string
// Branch is the name of the branch the interactions are being attempted on.
// It is required.
Branch string
// Credentials are the credentials attached to the commit.
Credentials []sigcred.CredentialUnion
// FilesChanged is the set of file paths (relative to the repo root) which
// have been modified in some way.
FilesChanged []string
// NonFastForward should be set to true if the branch HEAD and this commit
// are not directly related (i.e. neither is a direct ancestor of the
// other).
NonFastForward bool
}
// Action describes what action an AccessControl should perform
// when given a CommitRequest.
type Action string
// Enumerates possible Action values
const (
ActionAllow Action = "allow"
ActionDeny Action = "deny"
// ActionNext is used internally when a request does not match an
// AccessControl's filters. It _could_ be used in the Config as well, but it
// would be pretty pointless to do so, so we don't talk about it.
ActionNext Action = "next"
)
// AccessControl represents an access control object being defined in the
// Config.
// AccessControl describes a set of Filters, and the Actions which should be
// taken on a CommitRequest if those Filters all match on the CommitRequest.
type AccessControl struct {
Pattern string `yaml:"pattern"`
Condition Condition `yaml:"condition"`
Action Action `yaml:"action"`
Filters []FilterUnion `yaml:"filters"`
}
// ErrNoApplicableAccessControls is returned from ApplicableAccessControls when
// a changed path has no applicable AccessControls which match it.
type ErrNoApplicableAccessControls struct {
Path string
// ActionForCommit returns what Action this AccessControl says to take for a
// given CommitRequest. It may return ActionNext if the request is not matched
// by the AccessControl's Filters.
func (ac AccessControl) ActionForCommit(req CommitRequest) (Action, error) {
for _, filterUn := range ac.Filters {
if err := filterUn.Filter().MatchCommit(req); errors.As(err, new(ErrFilterNoMatch)) {
return ActionNext, nil
} else if err != nil {
return "", fmt.Errorf("matching commit using filter of type %q: %w", filterUn.Type(), err)
}
}
return ac.Action, nil
}
func (err ErrNoApplicableAccessControls) Error() string {
return fmt.Sprintf("no AccessControls which apply to changed file %q", err.Path)
// ErrCommitRequestDenied is returned from AssertCanCommit when a particular
// AccessControl has explicitly disallowed the CommitRequest.
type ErrCommitRequestDenied struct {
By AccessControl
}
// ApplicableAccessControls returns a subset of the given AccessControls which
// are applicable to the given file paths (ie those whose Conditions must be met
// in order for the changes to go through.
func ApplicableAccessControls(accessControls []AccessControl, filesChanged []string) ([]AccessControl, error) {
applicableSet := map[AccessControl]struct{}{}
for _, path := range filesChanged {
var any bool
for _, ac := range accessControls {
if ok, err := doublestar.PathMatch(ac.Pattern, path); err != nil {
return nil, fmt.Errorf("error matching path %q to patterrn %q: %w",
path, ac.Pattern, err)
} else if ok {
applicableSet[ac] = struct{}{}
any = true
break
}
func (e ErrCommitRequestDenied) Error() string {
acB, err := yaml.Marshal(e.By)
if err != nil {
panic(err)
}
return fmt.Sprintf("commit matched and denied by this access control:\n%s", string(acB))
}
// AssertCanCommit asserts that the given CommitRequest is allowed by the given
// AccessControls.
func AssertCanCommit(acl []AccessControl, req CommitRequest) error {
acl = append(acl, DefaultAccessControls...)
for _, ac := range acl {
action, err := ac.ActionForCommit(req)
if err != nil {
return err
}
if !any {
return nil, ErrNoApplicableAccessControls{Path: path}
switch action {
case ActionNext:
continue
case ActionAllow:
return nil
case ActionDeny:
return ErrCommitRequestDenied{By: ac}
default:
return fmt.Errorf("invalid action %q", action)
}
}
applicable := make([]AccessControl, 0, len(applicableSet))
for ac := range applicableSet {
applicable = append(applicable, ac)
}
return applicable, nil
panic("should not be able to get here")
}

@ -2,116 +2,143 @@ package accessctl
import (
"errors"
"reflect"
"sort"
"testing"
"dehub.dev/src/dehub.git/sigcred"
)
func TestApplicableAccessControls(t *testing.T) {
func TestAssertCanCommit(t *testing.T) {
tests := []struct {
descr string
patterns, filesChanged []string
exp []string
expErrPath string
descr string
acl []AccessControl
req CommitRequest
allowed bool
}{
{
descr: "empty input empty output",
},
{
descr: "empty patterns",
filesChanged: []string{"foo", "bar"},
expErrPath: "foo",
},
{
descr: "empty filesChanged",
patterns: []string{"patternA", "patternB"},
descr: "first allows",
acl: []AccessControl{
{
Action: ActionAllow,
Filters: []FilterUnion{{
PayloadType: &FilterPayloadType{Type: "foo"},
}},
},
{
Action: ActionDeny,
Filters: []FilterUnion{{
PayloadType: &FilterPayloadType{Type: "foo"},
}},
},
},
req: CommitRequest{Type: "foo"},
allowed: true,
},
{
descr: "no applicable files",
filesChanged: []string{"foo"},
patterns: []string{"bar"},
expErrPath: "foo",
descr: "first denies",
acl: []AccessControl{
{
Action: ActionDeny,
Filters: []FilterUnion{{
PayloadType: &FilterPayloadType{Type: "foo"},
}},
},
{
Action: ActionAllow,
Filters: []FilterUnion{{
PayloadType: &FilterPayloadType{Type: "foo"},
}},
},
},
req: CommitRequest{Type: "foo"},
allowed: false,
},
{
descr: "all applicable files",
filesChanged: []string{"foo", "bar"},
patterns: []string{"**"},
exp: []string{"**"},
descr: "second allows",
acl: []AccessControl{
{
Action: ActionDeny,
Filters: []FilterUnion{{
PayloadType: &FilterPayloadType{Type: "bar"},
}},
},
{
Action: ActionAllow,
Filters: []FilterUnion{{
PayloadType: &FilterPayloadType{Type: "foo"},
}},
},
},
req: CommitRequest{Type: "foo"},
allowed: true,
},
{
descr: "pattern precedent",
filesChanged: []string{"foo"},
patterns: []string{"foo", "**"},
exp: []string{"foo"},
descr: "second denies",
acl: []AccessControl{
{
Action: ActionDeny,
Filters: []FilterUnion{{
PayloadType: &FilterPayloadType{Type: "bar"},
}},
},
{
Action: ActionDeny,
Filters: []FilterUnion{{
PayloadType: &FilterPayloadType{Type: "foo"},
}},
},
},
req: CommitRequest{Type: "foo"},
allowed: false,
},
{
descr: "pattern precedent inv",
filesChanged: []string{"foo"},
patterns: []string{"**", "foo"},
exp: []string{"**"},
descr: "default allows",
acl: []AccessControl{
{
Action: ActionDeny,
Filters: []FilterUnion{{
PayloadType: &FilterPayloadType{Type: "bar"},
}},
},
},
req: CommitRequest{
Branch: "not_main",
Type: "foo",
Credentials: []sigcred.CredentialUnion{{
PGPSignature: new(sigcred.CredentialPGPSignature),
AccountID: "a",
}},
},
allowed: true,
},
{
descr: "individual matches",
filesChanged: []string{"foo", "bar/baz"},
patterns: []string{"foo", "bar/baz"},
exp: []string{"foo", "bar/baz"},
},
{
descr: "star match dir",
filesChanged: []string{"foo", "bar/baz"},
patterns: []string{"foo", "bar/*"},
exp: []string{"foo", "bar/*"},
},
{
descr: "star not match dir",
filesChanged: []string{"foo", "bar/baz/biz"},
patterns: []string{"foo", "bar/*"},
expErrPath: "bar/baz/biz",
},
{
descr: "doublestar match dir",
filesChanged: []string{"foo", "bar/bar", "bar/baz/biz"},
patterns: []string{"foo", "bar/**"},
exp: []string{"foo", "bar/**"},
descr: "default denies",
acl: []AccessControl{
{
Action: ActionDeny,
Filters: []FilterUnion{{
PayloadType: &FilterPayloadType{Type: "bar"},
}},
},
},
req: CommitRequest{
Branch: "main",
Type: "foo",
Credentials: []sigcred.CredentialUnion{{
PGPSignature: new(sigcred.CredentialPGPSignature),
AccountID: "a",
}},
},
allowed: false,
},
}
for _, test := range tests {
t.Run(test.descr, func(t *testing.T) {
accessControls := make([]AccessControl, len(test.patterns))
for i := range test.patterns {
accessControls[i] = AccessControl{Pattern: test.patterns[i]}
}
out, err := ApplicableAccessControls(accessControls, test.filesChanged)
if err != nil && test.expErrPath == "" {
t.Fatalf("unexpected error: %v", err)
} else if test.expErrPath != "" {
if noAppErr := (ErrNoApplicableAccessControls{}); !errors.As(err, &noAppErr) {
t.Fatalf("expected ErrNoApplicableAccessControls for path %q, but got %v", test.expErrPath, err)
} else if test.expErrPath != noAppErr.Path {
t.Fatalf("expected ErrNoApplicableAccessControls for path %q, but got one for path %q", test.expErrPath, noAppErr.Path)
}
return
}
outPatterns := make([]string, len(out))
for i := range out {
outPatterns[i] = out[i].Pattern
}
clean := func(s []string) []string {
if len(s) == 0 {
return nil
}
sort.Strings(s)
return s
}
outPatterns = clean(outPatterns)
test.exp = clean(test.exp)
if !reflect.DeepEqual(outPatterns, test.exp) {
t.Fatalf("expected: %+v\ngot: %+v", test.exp, outPatterns)
err := AssertCanCommit(test.acl, test.req)
if test.allowed && err != nil {
t.Fatalf("expected to be allowed but got: %v", err)
} else if !test.allowed && !errors.As(err, new(ErrCommitRequestDenied)) {
t.Fatalf("expected to be denied but got: %v", err)
}
})
}

@ -1,130 +0,0 @@
package accessctl
import (
"dehub/sigcred"
"dehub/typeobj"
"errors"
"fmt"
"math"
"strconv"
"strings"
)
// ConditionInterface describes the methods that all Signifiers must implement.
type ConditionInterface interface {
// Satisfied asserts that the Condition is satisfied by the given set of
// Credentials. If it is not (or something else went wrong) then an error is
// returned.
//
// NOTE that Satisfied assumes the Credential has already been Verify'd.
Satisfied([]sigcred.Credential) error
}
// Condition represents an access control condition being defined in the Config.
// Only one of its fields may be filled in at a time.
type Condition struct {
Signature *ConditionSignature `type:"signature"`
}
// MarshalYAML implements the yaml.Marshaler interface.
func (c Condition) MarshalYAML() (interface{}, error) {
return typeobj.MarshalYAML(c)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *Condition) UnmarshalYAML(unmarshal func(interface{}) error) error {
return typeobj.UnmarshalYAML(c, unmarshal)
}
// Interface returns the ConditionInterface encapsulated by this Condition
// object.
func (c Condition) Interface() (ConditionInterface, error) {
el, _, err := typeobj.Element(c)
if err != nil {
return nil, err
}
return el.(ConditionInterface), nil
}
// ConditionSignature represents the configuration of an access control
// condition which requires one or more signatures to be present on a commit.
//
// Either AccountIDs or AccountIDsByMeta must be filled.
type ConditionSignature struct {
AccountIDs []string `yaml:"account_ids,omitempty"`
AnyAccount bool `yaml:"any_account,omitempty"`
Count string `yaml:"count"`
}
var _ ConditionInterface = ConditionSignature{}
func (condSig ConditionSignature) targetNum() (int, error) {
if !strings.HasSuffix(condSig.Count, "%") {
return strconv.Atoi(condSig.Count)
} else if condSig.AnyAccount {
return 0, errors.New("cannot use AnyAccount and a percent Count together")
}
percentStr := strings.TrimRight(condSig.Count, "%")
percent, err := strconv.ParseFloat(percentStr, 64)
if err != nil {
return 0, fmt.Errorf("could not parse Count as percent %q: %w", condSig.Count, err)
}
targetF := float64(len(condSig.AccountIDs)) * percent / 100
targetF = math.Ceil(targetF)
return int(targetF), nil
}
// ErrConditionSignatureUnsatisfied is returned from ConditionSignature's
// Satisfied method when the Condition has not been satisfied.
type ErrConditionSignatureUnsatisfied struct {
TargetNumAccounts, NumAccounts int
}
func (err ErrConditionSignatureUnsatisfied) Error() string {
return fmt.Sprintf("not enough valid signature credentials, requires %d but only had %d",
err.TargetNumAccounts, err.NumAccounts)
}
// Satisfied asserts that the given Credentials contains enough signatures to be
// satisfied.
func (condSig ConditionSignature) Satisfied(creds []sigcred.Credential) error {
targetN, err := condSig.targetNum()
if err != nil {
return fmt.Errorf("could not compute ConditionSignature target number of accounts: %w", err)
}
credAccountIDs := map[string]struct{}{}
for _, cred := range creds {
// TODO currently only signature credentials are implemented, so we can
// just assume that the given AccountID has provided a sig. In the
// future this may not be true.
credAccountIDs[cred.AccountID] = struct{}{}
}
var n int
if condSig.AnyAccount {
// TODO this doesn't actually check that the accounts are defined in the
// Config.
n = len(credAccountIDs)
} else {
targetAccountIDs := map[string]struct{}{}
for _, accountID := range condSig.AccountIDs {
targetAccountIDs[accountID] = struct{}{}
}
for accountID := range targetAccountIDs {
if _, ok := credAccountIDs[accountID]; ok {
n++
}
}
}
if n < targetN {
return ErrConditionSignatureUnsatisfied{
TargetNumAccounts: targetN,
NumAccounts: n,
}
}
return nil
}

@ -1,110 +0,0 @@
package accessctl
import (
"dehub/sigcred"
"reflect"
"testing"
)
func TestConditionSignatureSatisfied(t *testing.T) {
tests := []struct {
descr string
cond ConditionSignature
credAccountIDs []string
err error
}{
{
descr: "no cred accounts",
cond: ConditionSignature{
AnyAccount: true,
Count: "1",
},
err: ErrConditionSignatureUnsatisfied{
TargetNumAccounts: 1,
NumAccounts: 0,
},
},
{
descr: "one cred account",
cond: ConditionSignature{
AnyAccount: true,
Count: "1",
},
credAccountIDs: []string{"foo"},
},
{
descr: "one matching cred account",
cond: ConditionSignature{
AccountIDs: []string{"foo", "bar"},
Count: "1",
},
credAccountIDs: []string{"foo"},
},
{
descr: "no matching cred account",
cond: ConditionSignature{
AccountIDs: []string{"foo", "bar"},
Count: "1",
},
credAccountIDs: []string{"baz"},
err: ErrConditionSignatureUnsatisfied{
TargetNumAccounts: 1,
NumAccounts: 0,
},
},
{
descr: "two matching cred accounts",
cond: ConditionSignature{
AccountIDs: []string{"foo", "bar"},
Count: "2",
},
credAccountIDs: []string{"foo", "bar"},
},
{
descr: "one matching cred account, missing one",
cond: ConditionSignature{
AccountIDs: []string{"foo", "bar"},
Count: "2",
},
credAccountIDs: []string{"foo", "baz"},
err: ErrConditionSignatureUnsatisfied{
TargetNumAccounts: 2,
NumAccounts: 1,
},
},
{
descr: "50 percent matching cred accounts",
cond: ConditionSignature{
AccountIDs: []string{"foo", "bar", "baz"},
Count: "50%",
},
credAccountIDs: []string{"foo", "bar"},
},
{
descr: "not 50 percent matching cred accounts",
cond: ConditionSignature{
AccountIDs: []string{"foo", "bar", "baz"},
Count: "50%",
},
credAccountIDs: []string{"foo"},
err: ErrConditionSignatureUnsatisfied{
TargetNumAccounts: 2,
NumAccounts: 1,
},
},
}
for _, test := range tests {
t.Run(test.descr, func(t *testing.T) {
creds := make([]sigcred.Credential, len(test.credAccountIDs))
for i := range test.credAccountIDs {
creds[i].AccountID = test.credAccountIDs[i]
}
err := test.cond.Satisfied(creds)
if !reflect.DeepEqual(err, test.err) {
t.Fatalf("Satisfied returned %#v\nexpected %#v", err, test.err)
}
})
}
}

@ -0,0 +1,124 @@
package accessctl
import (
"errors"
"fmt"
"dehub.dev/src/dehub.git/typeobj"
)
// ErrFilterNoMatch is returned from a FilterInterface's Match method when the
// given request was not matched to the filter due to the request itself (as
// opposed to some error in the filter's definition).
type ErrFilterNoMatch struct {
Err error
}
func (err ErrFilterNoMatch) Error() string {
return fmt.Sprintf("matching with filter: %s", err.Err.Error())
}
// Filter describes the methods that all Filters must implement.
type Filter interface {
// MatchCommit returns nil if the CommitRequest is matched by the filter,
// otherwise it returns an error (ErrFilterNoMatch if the error is due to
// the CommitRequest).
MatchCommit(CommitRequest) error
}
// FilterUnion represents an access control filter being defined in the Config.
// Only one of its fields may be filled at a time.
type FilterUnion struct {
Signature *FilterSignature `type:"signature"`
Branch *FilterBranch `type:"branch"`
FilesChanged *FilterFilesChanged `type:"files_changed"`
PayloadType *FilterPayloadType `type:"payload_type"`
CommitAttributes *FilterCommitAttributes `type:"commit_attributes"`
Not *FilterNot `type:"not"`
}
// MarshalYAML implements the yaml.Marshaler interface.
func (f FilterUnion) MarshalYAML() (interface{}, error) {
return typeobj.MarshalYAML(f)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (f *FilterUnion) UnmarshalYAML(unmarshal func(interface{}) error) error {
return typeobj.UnmarshalYAML(f, unmarshal)
}
// Filter returns the Filter encapsulated by this FilterUnion.
//
// This method will panic if a Filter field is not populated.
func (f FilterUnion) Filter() Filter {
el, _, err := typeobj.Element(f)
if err != nil {
panic(err)
}
return el.(Filter)
}
// Type returns the Filter's type (as would be used in its YAML "type" field).
//
// This will panic if a Filter field is not populated.
func (f FilterUnion) Type() string {
_, typeStr, err := typeobj.Element(f)
if err != nil {
panic(err)
}
return typeStr
}
// FilterPayloadType filters by what type of payload is being requested. Exactly
// one of its fields should be filled.
type FilterPayloadType struct {
Type string `yaml:"payload_type"`
Types []string `yaml:"payload_types"`
}
var _ Filter = FilterPayloadType{}
// MatchCommit implements the method for FilterInterface.
func (f FilterPayloadType) MatchCommit(req CommitRequest) error {
switch {
case f.Type != "":
if f.Type != req.Type {
return ErrFilterNoMatch{
Err: fmt.Errorf("payload type %q does not match filter's type %q",
req.Type, f.Type),
}
}
return nil
case len(f.Types) > 0:
for _, typ := range f.Types {
if typ == req.Type {
return nil
}
}
return ErrFilterNoMatch{
Err: fmt.Errorf("payload type %q does not match any of filter's types %+v",
req.Type, f.Types),
}
default:
return errors.New(`one of the following fields must be set: "payload_type", "payload_types"`)
}
}
// FilterCommitAttributes filters by one more attributes a commit can have. If
// more than one field is filled in then all relevant attributes must be present
// on the commit for this filter to match.
type FilterCommitAttributes struct {
NonFastForward bool `yaml:"non_fast_forward"`
}
var _ Filter = FilterCommitAttributes{}
// MatchCommit implements the method for FilterInterface.
func (f FilterCommitAttributes) MatchCommit(req CommitRequest) error {
if f.NonFastForward && !req.NonFastForward {
return ErrFilterNoMatch{Err: errors.New("commit is a fast-forward")}
}
return nil
}

@ -0,0 +1,26 @@
package accessctl
import (
"errors"
)
// FilterNot wraps another Filter. If that filter matches, FilterNot does not
// match, and vice-versa.
type FilterNot struct {
Filter FilterUnion `yaml:"filter"`
}
var _ Filter = FilterNot{}
// MatchCommit implements the method for FilterInterface.
func (f FilterNot) MatchCommit(req CommitRequest) error {
if err := f.Filter.Filter().MatchCommit(req); errors.As(err, new(ErrFilterNoMatch)) {
return nil
} else if err != nil {
return err
}
return ErrFilterNoMatch{Err: errors.New("sub-filter did match")}
}
// TODO FilterAll
// TODO FilterAny

@ -0,0 +1,32 @@
package accessctl
import "testing"
func TestFilterNot(t *testing.T) {
runCommitMatchTests(t, []filterCommitMatchTest{
{
descr: "sub-filter does match",
filter: FilterNot{
Filter: FilterUnion{
PayloadType: &FilterPayloadType{Type: "foo"},
},
},
req: CommitRequest{
Type: "foo",
},
match: false,
},
{
descr: "sub-filter does not match",
filter: FilterNot{
Filter: FilterUnion{
PayloadType: &FilterPayloadType{Type: "foo"},
},
},
req: CommitRequest{
Type: "bar",
},
match: true,
},
})
}

@ -0,0 +1,96 @@
package accessctl
import (
"errors"
"fmt"
"github.com/bmatcuk/doublestar"
)
// StringMatcher is used to match against a string. It can use one of several
// methods to match. Only one field should be filled at a time.
type StringMatcher struct {
// Pattern, if set, indicates that the Match method should succeed if this
// doublestar pattern matches against the string.
Pattern string `yaml:"pattern,omitempty"`
// Patterns, if set, indicates that the Match method should succeed if at
// least one of these doublestar patterns matches against the string.
Patterns []string `yaml:"patterns,omitempty"`
}
func doublestarMatch(pattern, str string) (bool, error) {
ok, err := doublestar.Match(pattern, str)
if err != nil {
return false, fmt.Errorf("matching %q on pattern %q: %w",
str, pattern, err)
}
return ok, nil
}
// Match operates similarly to the Match method of the FilterInterface, except
// it only takes in strings.
func (m StringMatcher) Match(str string) error {
switch {
case m.Pattern != "":
if ok, err := doublestarMatch(m.Pattern, str); err != nil {
return err
} else if !ok {
return ErrFilterNoMatch{
Err: fmt.Errorf("pattern %q does not match %q", m.Pattern, str),
}
}
return nil
case len(m.Patterns) > 0:
for _, pattern := range m.Patterns {
if ok, err := doublestarMatch(pattern, str); err != nil {
return err
} else if ok {
return nil
}
}
return ErrFilterNoMatch{
Err: fmt.Errorf("no patterns in %+v match %q", m.Patterns, str),
}
default:
return errors.New(`one of the following fields must be set: "pattern", "patterns"`)
}
}
// FilterBranch matches a CommitRequest's Branch field using a double-star
// pattern.
type FilterBranch struct {
StringMatcher StringMatcher `yaml:",inline"`
}
var _ Filter = FilterBranch{}
// MatchCommit implements the method for FilterInterface.
func (f FilterBranch) MatchCommit(req CommitRequest) error {
return f.StringMatcher.Match(req.Branch)
}
// FilterFilesChanged matches a CommitRequest's FilesChanged field using a
// double-star pattern. It only matches if all of the CommitRequest's
// FilesChanged match.
type FilterFilesChanged struct {
StringMatcher StringMatcher `yaml:",inline"`
}
var _ Filter = FilterFilesChanged{}
// MatchCommit implements the method for FilterInterface.
func (f FilterFilesChanged) MatchCommit(req CommitRequest) error {
for _, path := range req.FilesChanged {
if err := f.StringMatcher.Match(path); errors.As(err, new(ErrFilterNoMatch)) {
continue
} else if err != nil {
return err
}
return nil
}
return ErrFilterNoMatch{Err: errors.New("no paths matched")}
}

@ -0,0 +1,199 @@
package accessctl
import (
"errors"
"testing"
)
func TestStringMatcher(t *testing.T) {
tests := []struct {
descr string
matcher StringMatcher
str string
match bool
}{
// Pattern
{
descr: "pattern exact match",
matcher: StringMatcher{
Pattern: "foo",
},
str: "foo",
match: true,
},
{
descr: "pattern exact no match",
matcher: StringMatcher{
Pattern: "foo",
},
str: "bar",
match: false,
},
{
descr: "pattern single star match",
matcher: StringMatcher{
Pattern: "foo/*",
},
str: "foo/bar",
match: true,
},
{
descr: "pattern single star no match 1",
matcher: StringMatcher{
Pattern: "foo/*",
},
str: "foo",
match: false,
},
{
descr: "pattern single star no match 2",
matcher: StringMatcher{
Pattern: "foo/*",
},
str: "foo/bar/baz",
match: false,
},
{
descr: "pattern double star match 1",
matcher: StringMatcher{
Pattern: "foo/**",
},
str: "foo/bar",
match: true,
},
{
descr: "pattern double star match 2",
matcher: StringMatcher{
Pattern: "foo/**",
},
str: "foo/bar/baz",
match: true,
},
{
descr: "pattern double star no match",
matcher: StringMatcher{
Pattern: "foo/**",
},
str: "foo",
match: false,
},
// Patterns, assumes individual pattern matching works correctly
{
descr: "patterns single match",
matcher: StringMatcher{
Patterns: []string{"foo"},
},
str: "foo",
match: true,
},
{
descr: "patterns single no match",
matcher: StringMatcher{
Patterns: []string{"foo"},
},
str: "bar",
match: false,
},
{
descr: "patterns multi first match",
matcher: StringMatcher{
Patterns: []string{"foo", "bar"},
},
str: "foo",
match: true,
},
{
descr: "patterns multi second match",
matcher: StringMatcher{
Patterns: []string{"foo", "bar"},
},
str: "bar",
match: true,
},
{
descr: "patterns multi no match",
matcher: StringMatcher{
Patterns: []string{"foo", "bar"},
},
str: "baz",
match: false,
},
}
for _, test := range tests {
t.Run(test.descr, func(t *testing.T) {
err := test.matcher.Match(test.str)
if test.match && err != nil {
t.Fatalf("expected to match, got %v", err)
} else if !test.match && !errors.As(err, new(ErrFilterNoMatch)) {
t.Fatalf("expected ErrFilterNoMatch, got %#v", err)
}
})
}
}
func TestFilterFilesChanged(t *testing.T) {
mkReq := func(paths ...string) CommitRequest {
return CommitRequest{FilesChanged: paths}
}
runCommitMatchTests(t, []filterCommitMatchTest{
{
descr: "no paths",
filter: FilterFilesChanged{
StringMatcher: StringMatcher{Pattern: "foo"},
},
req: mkReq(),
match: false,
},
{
descr: "all paths against one pattern",
filter: FilterFilesChanged{
StringMatcher: StringMatcher{Pattern: "foo/*"},
},
req: mkReq("foo/bar", "foo/baz"),
match: true,
},
{
descr: "all paths against multiple patterns",
filter: FilterFilesChanged{
StringMatcher: StringMatcher{Patterns: []string{"foo", "bar"}},
},
req: mkReq("foo", "bar"),
match: true,
},
{
descr: "some paths against one pattern",
filter: FilterFilesChanged{
StringMatcher: StringMatcher{Pattern: "foo"},
},
req: mkReq("foo", "bar"),
match: true,
},
{
descr: "some paths against many patterns",
filter: FilterFilesChanged{
StringMatcher: StringMatcher{Patterns: []string{"foo", "bar"}},
},
req: mkReq("foo", "baz"),
match: true,
},
{
descr: "no paths against one pattern",
filter: FilterFilesChanged{
StringMatcher: StringMatcher{Pattern: "foo"},
},
req: mkReq("baz", "buz"),
match: false,
},
{
descr: "no paths against many patterns",
filter: FilterFilesChanged{
StringMatcher: StringMatcher{Patterns: []string{"foo", "bar"}},
},
req: mkReq("baz", "buz"),
match: false,
},
})
}

@ -0,0 +1,113 @@
package accessctl
import (
"errors"
"fmt"
"math"
"strconv"
"strings"
)
// FilterSignature represents the configuration of a Filter which requires one
// or more signature credentials to be present on a commit.
//
// Either AccountIDs, AnyAccount, or Any must be filled in; all are mutually
// exclusive.
type FilterSignature struct {
AccountIDs []string `yaml:"account_ids,omitempty"`
Any bool `yaml:"any,omitempty"`
AnyAccount bool `yaml:"any_account,omitempty"`
Count string `yaml:"count,omitempty"`
}
var _ Filter = FilterSignature{}
func (f FilterSignature) targetNum() (int, error) {
if f.Count == "" {
return 1, nil
} else if !strings.HasSuffix(f.Count, "%") {
return strconv.Atoi(f.Count)
} else if f.AnyAccount {
return 0, errors.New("cannot use AnyAccount and a percent Count together")
}
percentStr := strings.TrimRight(f.Count, "%")
percent, err := strconv.ParseFloat(percentStr, 64)
if err != nil {
return 0, fmt.Errorf("could not parse Count as percent %q: %w", f.Count, err)
}
target := float64(len(f.AccountIDs)) * percent / 100
target = math.Ceil(target)
return int(target), nil
}
// ErrFilterSignatureUnsatisfied is returned from FilterSignature's
// Match method when the filter has not been satisfied.
type ErrFilterSignatureUnsatisfied struct {
TargetNumAccounts, NumAccounts int
}
func (err ErrFilterSignatureUnsatisfied) Error() string {
return fmt.Sprintf("not enough valid signature credentials, filter requires %d but only had %d",
err.TargetNumAccounts, err.NumAccounts)
}
// MatchCommit returns true if the CommitRequest contains a sufficient number of
// signature Credentials.
func (f FilterSignature) MatchCommit(req CommitRequest) error {
targetN, err := f.targetNum()
if err != nil {
return fmt.Errorf("computing target number of accounts: %w", err)
}
var numSigs int
credAccountIDs := map[string]struct{}{}
for _, cred := range req.Credentials {
// TODO support other kinds of signatures
if cred.PGPSignature == nil {
continue
}
numSigs++
if cred.AccountID != "" {
credAccountIDs[cred.AccountID] = struct{}{}
}
}
if numSigs == 0 {
return ErrFilterNoMatch{
Err: ErrFilterSignatureUnsatisfied{TargetNumAccounts: targetN},
}
}
var n int
if f.Any {
return nil
} else if f.AnyAccount {
// TODO this doesn't actually check that the accounts are defined in the
// Config. It works for now as long as the Credentials are valid, since
// only an Account defined in the Config could create a valid
// Credential, but once that's not the case this will need to be
// revisited.
n = len(credAccountIDs)
} else {
targetAccountIDs := map[string]struct{}{}
for _, accountID := range f.AccountIDs {
targetAccountIDs[accountID] = struct{}{}
}
for accountID := range targetAccountIDs {
if _, ok := credAccountIDs[accountID]; ok {
n++
}
}
}
if n >= targetN {
return nil
}
return ErrFilterNoMatch{
Err: ErrFilterSignatureUnsatisfied{
NumAccounts: n,
TargetNumAccounts: targetN,
},
}
}

@ -0,0 +1,124 @@
package accessctl
import (
"testing"
"dehub.dev/src/dehub.git/sigcred"
)
func TestFilterSignature(t *testing.T) {
mkReq := func(accountIDs ...string) CommitRequest {
creds := make([]sigcred.CredentialUnion, len(accountIDs))
for i := range accountIDs {
creds[i].PGPSignature = new(sigcred.CredentialPGPSignature)
creds[i].AccountID = accountIDs[i]
}
return CommitRequest{Credentials: creds}
}
runCommitMatchTests(t, []filterCommitMatchTest{
{
descr: "no cred accounts",
filter: FilterSignature{
AnyAccount: true,
Count: "1",
},
matchErr: ErrFilterSignatureUnsatisfied{
TargetNumAccounts: 1,
NumAccounts: 0,
},
},
{
descr: "one cred account",
filter: FilterSignature{
AnyAccount: true,
Count: "1",
},
req: mkReq("foo"),
match: true,
},
{
descr: "one matching cred account",
filter: FilterSignature{
AccountIDs: []string{"foo", "bar"},
Count: "1",
},
req: mkReq("foo"),
match: true,
},
{
descr: "no matching cred account",
filter: FilterSignature{
AccountIDs: []string{"foo", "bar"},
Count: "1",
},
req: mkReq("baz"),
matchErr: ErrFilterSignatureUnsatisfied{
TargetNumAccounts: 1,
NumAccounts: 0,
},
},
{
descr: "two matching cred accounts",
filter: FilterSignature{
AccountIDs: []string{"foo", "bar"},
Count: "2",
},
req: mkReq("foo", "bar"),
match: true,
},
{
descr: "one matching cred account, missing one",
filter: FilterSignature{
AccountIDs: []string{"foo", "bar"},
Count: "2",
},
req: mkReq("foo", "baz"),
matchErr: ErrFilterSignatureUnsatisfied{
TargetNumAccounts: 2,
NumAccounts: 1,
},
},
{
descr: "50 percent matching cred accounts",
filter: FilterSignature{
AccountIDs: []string{"foo", "bar", "baz"},
Count: "50%",
},
req: mkReq("foo", "bar"),
match: true,
},
{
descr: "not 50 percent matching cred accounts",
filter: FilterSignature{
AccountIDs: []string{"foo", "bar", "baz"},
Count: "50%",
},
req: mkReq("foo"),
matchErr: ErrFilterSignatureUnsatisfied{
TargetNumAccounts: 2,
NumAccounts: 1,
},
},
{
descr: "any sig at all",
filter: FilterSignature{
Any: true,
},
req: CommitRequest{
Credentials: []sigcred.CredentialUnion{
{PGPSignature: new(sigcred.CredentialPGPSignature)},
},
},
match: true,
},
{
descr: "not any sig at all",
filter: FilterSignature{Any: true},
req: CommitRequest{},
matchErr: ErrFilterSignatureUnsatisfied{
TargetNumAccounts: 1,
},
},
})
}

@ -0,0 +1,137 @@
package accessctl
import (
"errors"
"reflect"
"testing"
)
type filterCommitMatchTest struct {
descr string
filter Filter
req CommitRequest
match bool
// assumes match == false, and will ensure that the returned wrapped error
// is this one.
matchErr error
}
func runCommitMatchTests(t *testing.T, tests []filterCommitMatchTest) {
for _, test := range tests {
t.Run(test.descr, func(t *testing.T) {
err := test.filter.MatchCommit(test.req)
shouldMatch := test.match && test.matchErr == nil
if shouldMatch && err != nil {
t.Fatalf("expected to match, got %v", err)
} else if shouldMatch {
return
} else if fErr := new(ErrFilterNoMatch); !errors.As(err, fErr) {
t.Fatalf("expected ErrFilterNoMatch, got: %#v", err)
} else if test.matchErr != nil && !reflect.DeepEqual(fErr.Err, test.matchErr) {
t.Fatalf("expected err %#v, not %#v", test.matchErr, fErr.Err)
}
})
}
}
func TestFilterPayloadType(t *testing.T) {
mkReq := func(commitType string) CommitRequest {
return CommitRequest{Type: commitType}
}
runCommitMatchTests(t, []filterCommitMatchTest{
{
descr: "single match",
filter: FilterPayloadType{
Type: "foo",
},
req: mkReq("foo"),
match: true,
},
{
descr: "single no match",
filter: FilterPayloadType{
Type: "foo",
},
req: mkReq("bar"),
match: false,
},
{
descr: "multi match first",
filter: FilterPayloadType{
Types: []string{"foo", "bar"},
},
req: mkReq("foo"),
match: true,
},
{
descr: "multi match second",
filter: FilterPayloadType{
Types: []string{"foo", "bar"},
},
req: mkReq("bar"),
match: true,
},
{
descr: "multi no match",
filter: FilterPayloadType{
Types: []string{"foo", "bar"},
},
req: mkReq("baz"),
match: false,
},
})
}
func TestFilterCommitAttributes(t *testing.T) {
mkReq := func(nonFF bool) CommitRequest {
return CommitRequest{NonFastForward: nonFF}
}
runCommitMatchTests(t, []filterCommitMatchTest{
{
descr: "ff with empty filter",
filter: FilterCommitAttributes{},
req: mkReq(false),
match: true,
},
{
descr: "non-ff with empty filter",
filter: FilterCommitAttributes{},
req: mkReq(true),
match: true,
},
{
descr: "ff with non-ff filter",
filter: FilterCommitAttributes{NonFastForward: true},
req: mkReq(false),
match: false,
},
{
descr: "non-ff with non-ff filter",
filter: FilterCommitAttributes{NonFastForward: true},
req: mkReq(true),
match: true,
},
{
descr: "ff with inverted non-ff filter",
filter: FilterNot{Filter: FilterUnion{
CommitAttributes: &FilterCommitAttributes{NonFastForward: true},
}},
req: mkReq(false),
match: true,
},
{
descr: "non-ff with inverted non-ff filter",
filter: FilterNot{Filter: FilterUnion{
CommitAttributes: &FilterCommitAttributes{NonFastForward: true},
}},
req: mkReq(true),
match: false,
},
})
}

@ -1,75 +0,0 @@
package dehub
import (
"crypto/sha256"
"encoding/binary"
"fmt"
"hash"
"sort"
"gopkg.in/src-d/go-git.v4/plumbing/object"
)
var (
defaultHashHelperAlgo = sha256.New
)
type hashHelper struct {
hash.Hash
varintBuf []byte
}
// if h is nil it then defaultHashHelperAlgo will be used
func newHashHelper(h hash.Hash) *hashHelper {
if h == nil {
h = defaultHashHelperAlgo()
}
s := &hashHelper{
Hash: h,
varintBuf: make([]byte, binary.MaxVarintLen64),
}
return s
}
func (s *hashHelper) writeUint(i uint64) {
n := binary.PutUvarint(s.varintBuf, i)
if _, err := s.Write(s.varintBuf[:n]); err != nil {
panic(fmt.Sprintf("error writing %x to sha256 sum: %v", s.varintBuf[:n], err))
}
}
func (s *hashHelper) writeStr(str string) {
s.writeUint(uint64(len(str)))
s.Write([]byte(str))
}
func (s *hashHelper) writeTreeDiff(from, to *object.Tree) {
filesChanged, err := calcDiff(from, to)
if err != nil {
panic(err.Error())
}
sort.Slice(filesChanged, func(i, j int) bool {
return filesChanged[i].path < filesChanged[j].path
})
s.writeUint(uint64(len(filesChanged)))
for _, fileChanged := range filesChanged {
s.writeStr(fileChanged.path)
s.Write(fileChanged.fromMode.Bytes())
s.Write(fileChanged.fromHash[:])
s.Write(fileChanged.toMode.Bytes())
s.Write(fileChanged.toHash[:])
}
}
var changeHashVersion = []byte{0}
// if h is nil it then defaultHashHelperAlgo will be used
func genChangeHash(h hash.Hash, msg string, from, to *object.Tree) []byte {
s := newHashHelper(h)
s.writeStr(msg)
s.writeTreeDiff(from, to)
return s.Sum(changeHashVersion)
}

@ -0,0 +1,39 @@
# dehub-remote
This directory provides a simple Docker image which can be spun up to run a
dehub-enabled git http remote server. Commits which are pushed to this server
will be automatically verified using `dehub verify`.
The docker image is also being hosted on docker hub at
[mediocregopher/dehub-remote][dehub-remote]. Proper image tagging/versioning
coming soon!
[dehub-remote]: https://hub.docker.com/repository/docker/mediocregopher/dehub-remote
## Usage
Running the following:
```
docker run \
--name dehub \
-v /opt/dehub/repos:/repos \
-p 8080:80 \
mediocregopher/dehub-remote repo-a.git repo-b.git
```
Will start an http server on port 8080, using `/opt/dehub/repos` to store all
repo folders. It will then initialize repo directories at
`/opt/dehub/repos/repo-a.git` and `/opt/dehub/repos/repo-b.git`, if they arent
already there.
## Extras
For convenience the docker image also includes the
[git-http-server](../git-http-server/) binary.
## Contributors
The Dockerfile being used is based on
[gitbox](https://github.com/nmarus/docker-gitbox), so thank you to nmarus for
the great work there.

@ -0,0 +1,44 @@
user git git;
worker_processes 1;
pid /run/nginx.pid;
events {
worker_connections 1024;
}
http {
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 15;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
server_names_hash_bucket_size 64;
server {
listen 80;
server_name MYSERVER default;
root /var/www;
access_log /var/log/nginx/MYSERVER.access.log combined;
error_log /var/log/nginx/MYSERVER.error.log error;
#git SMART HTTP
location / {
client_max_body_size 0;
fastcgi_param SCRIPT_FILENAME /usr/lib/git-core/git-http-backend;
fastcgi_param GIT_HTTP_EXPORT_ALL "";
fastcgi_param GIT_PROJECT_ROOT /repos;
fastcgi_param PATH_INFO $uri;
include /etc/nginx/fastcgi_params;
fastcgi_pass unix:/var/run/fcgiwrap.socket;
}
}
}

@ -0,0 +1,86 @@
#!/bin/bash
set -e
QUIET=false
#SFLOG="/start.log"
#print timestamp
timestamp() {
date +"%Y-%m-%d %T"
}
#screen/file logger
sflog() {
#if $1 is not null
if [ ! -z ${1+x} ]; then
message=$1
else
#exit function
return 1;
fi
#if $QUIET is not true
if ! $($QUIET); then
echo "${message}"
fi
#if $SFLOG is not null
if [ ! -z ${SFLOG+x} ]; then
#if $2 is regular file or does not exist
if [ -f ${SFLOG} ] || [ ! -e ${SFLOG} ]; then
echo "$(timestamp) ${message}" >> ${SFLOG}
fi
fi
}
#start services function
startc() {
sflog "Services for container are being started..."
/etc/init.d/fcgiwrap start > /dev/null
/etc/init.d/nginx start > /dev/null
sflog "The container services have started..."
}
#stop services function
stopc() {
sflog "Services for container are being stopped..."
/etc/init.d/nginx stop > /dev/null
/etc/init.d/fcgiwrap stop > /dev/null
sflog "Services for container have successfully stopped. Exiting."
exit 0
}
#trap "docker stop <container>" and shuts services down cleanly
trap "(stopc)" TERM INT
#startup
#test for ENV varibale $FQDN
if [ ! -z ${FQDN+x} ]; then
sflog "FQDN is set to ${FQDN}"
else
export FQDN=dehub
sflog "FQDN is set to ${FQDN}"
fi
#modify config files with fqdn
sed -i "s,MYSERVER,${FQDN},g" /etc/nginx/nginx.conf &> /dev/null
# create the individual repo directories
while [ ! -z "$1" ]; do
dir="/repos/$1";
if [ ! -d "$dir" ]; then
echo "Initializing repo $1"
mkdir "$dir"
dehub init -path "$dir" -bare -remote
chown -R git:git "$dir"
fi
shift
done
#start init.d services
startc
#pause script to keep container running...
sflog "Services for container successfully started."
sflog "Dumping logs"
tail -f /var/log/nginx/*.log

@ -0,0 +1,273 @@
package main
import (
"context"
"errors"
"fmt"
"dehub.dev/src/dehub.git"
"dehub.dev/src/dehub.git/cmd/dehub/dcmd"
"dehub.dev/src/dehub.git/sigcred"
"gopkg.in/src-d/go-git.v4/plumbing"
)
func cmdCommit(ctx context.Context, cmd *dcmd.Cmd) {
flag := cmd.FlagSet()
accountID := flag.String("as", "", "Account to accredit commit with")
pgpKeyID := flag.String("anon-pgp-key", "", "ID of pgp key to sign with instead of using an account")
var proj proj
proj.initFlags(flag)
accreditAndCommit := func(payUn dehub.PayloadUnion) error {
var sig sigcred.Signifier
if *accountID != "" {
cfg, err := proj.LoadConfig()
if err != nil {
return err
}
var account dehub.Account
var ok bool
for _, account = range cfg.Accounts {
if account.ID == *accountID {
ok = true
break
}
}
if !ok {
return fmt.Errorf("account ID %q not found in config", *accountID)
} else if l := len(account.Signifiers); l == 0 || l > 1 {
return fmt.Errorf("account %q has %d signifiers, only one is supported right now", *accountID, l)
}
sig = account.Signifiers[0].Signifier(*accountID)
} else {
var err error
if sig, err = sigcred.LoadSignifierPGP(*pgpKeyID, true); err != nil {
return fmt.Errorf("loading pgp key %q: %w", *pgpKeyID, err)
}
}
payUn, err := proj.AccreditPayload(payUn, sig)
if err != nil {
return fmt.Errorf("accrediting payload: %w", err)
}
commit, err := proj.Commit(payUn)
if err != nil {
return fmt.Errorf("committing to git: %w", err)
}
fmt.Printf("committed to HEAD as %s\n", commit.Hash)
return nil
}
var hasStaged bool
body := func() (context.Context, error) {
if *accountID == "" && *pgpKeyID == "" {
return nil, errors.New("-as or -anon-pgp-key is required")
}
if err := proj.openProj(); err != nil {
return nil, err
}
var err error
if hasStaged, err = proj.HasStagedChanges(); err != nil {
return nil, fmt.Errorf("determining if any changes have been staged: %w", err)
}
return ctx, nil
}
cmd.SubCmd("change", "Commit file changes",
func(ctx context.Context, cmd *dcmd.Cmd) {
flag := cmd.FlagSet()
description := flag.String("descr", "", "Description of changes")
amend := flag.Bool("amend", false, "Add changes to HEAD commit, amend its message, and re-accredit it")
cmd.Run(func() (context.Context, error) {
if !hasStaged && !*amend {
return nil, errors.New("no changes have been staged for commit")
}
var prevMsg string
if *amend {
oldHead, err := proj.softReset("change")
if err != nil {
return nil, err
}
prevMsg = oldHead.Payload.Change.Description
}
if *description == "" {
var err error
if *description, err = tmpFileMsg(defaultCommitFileMsgTpl, prevMsg); err != nil {
return nil, fmt.Errorf("error collecting commit message from user: %w", err)
} else if *description == "" {
return nil, errors.New("empty description, not doing anything")
}
}
payUn, err := proj.NewPayloadChange(*description)
if err != nil {
return nil, fmt.Errorf("could not construct change payload: %w", err)
} else if err := accreditAndCommit(payUn); err != nil {
return nil, err
}
return nil, nil
})
},
)
cmd.SubCmd("credential", "Commit credential of one or more change commits",
func(ctx context.Context, cmd *dcmd.Cmd) {
flag := cmd.FlagSet()
startRev := flag.String("start", "", "Revision of the starting commit to accredit (when accrediting a range of changes)")
endRev := flag.String("end", "HEAD", "Revision of the ending commit to accredit (when accrediting a range of changes)")
rev := flag.String("rev", "", "Revision of commit to accredit (when accrediting a single commit)")
description := flag.String("descr", "", "Description of changes being accredited")
cmd.Run(func() (context.Context, error) {
if *rev == "" && *startRev == "" {
return nil, errors.New("-rev or -start is required")
} else if hasStaged {
return nil, errors.New("credential commit cannot have staged changes")
}
var commits []dehub.Commit
if *rev != "" {
commit, err := proj.GetCommitByRevision(plumbing.Revision(*rev))
if err != nil {
return nil, fmt.Errorf("resolving revision %q: %w", *rev, err)
}
commits = []dehub.Commit{commit}
} else {
var err error
commits, err = proj.GetCommitRangeByRevision(
plumbing.Revision(*startRev),
plumbing.Revision(*endRev),
)
if err != nil {
return nil, fmt.Errorf("resolving revisions %q to %q: %w",
*startRev, *endRev, err)
}
}
var credPayUn dehub.PayloadUnion
if len(commits) == 0 {
return nil, errors.New("cannot create credential based on empty range of commits")
} else if len(commits) == 1 && commits[0].Payload.Credential != nil {
credPayUn = commits[0].Payload
} else {
if *description == "" {
lastDescr, err := dehub.LastChangeDescription(commits)
if err != nil {
return nil, fmt.Errorf("determining change description of commit(s): %w", err)
}
*description, err = tmpFileMsg(defaultCommitFileMsgTpl, lastDescr)
if err != nil {
return nil, fmt.Errorf("collecting credential description from user: %w", err)
} else if *description == "" {
return nil, errors.New("empty description, not doing anything")
}
}
var err error
credPayUn, err = proj.NewPayloadCredentialFromChanges(*description, commits)
if err != nil {
return nil, fmt.Errorf("constructing credential commit: %w", err)
}
}
if err := accreditAndCommit(credPayUn); err != nil {
return nil, err
}
return nil, nil
})
},
)
cmd.SubCmd("comment", "Commit a comment to a branch",
func(ctx context.Context, cmd *dcmd.Cmd) {
flag := cmd.FlagSet()
comment := flag.String("comment", "", "Comment message")
amend := flag.Bool("amend", false, "Amend the comment message currently in HEAD")
cmd.Run(func() (context.Context, error) {
if hasStaged {
return nil, errors.New("comment commit cannot have staged changes")
}
var prevComment string
if *amend {
oldHead, err := proj.softReset("comment")
if err != nil {
return nil, err
}
prevComment = oldHead.Payload.Comment.Comment
}
if *comment == "" {
var err error
if *comment, err = tmpFileMsg(defaultCommitFileMsgTpl, prevComment); err != nil {
return nil, fmt.Errorf("collecting comment message from user: %w", err)
} else if *comment == "" {
return nil, errors.New("empty comment message, not doing anything")
}
}
payUn, err := proj.NewPayloadComment(*comment)
if err != nil {
return nil, fmt.Errorf("constructing comment commit: %w", err)
}
return nil, accreditAndCommit(payUn)
})
},
)
cmd.Run(body)
}
func cmdCombine(ctx context.Context, cmd *dcmd.Cmd) {
flag := cmd.FlagSet()
onto := flag.String("onto", "", "Branch the new commit should be put onto")
startRev := flag.String("start", "", "Revision of the starting commit to combine")
endRev := flag.String("end", "", "Revision of the ending commit to combine")
var proj proj
proj.initFlags(flag)
cmd.Run(func() (context.Context, error) {
if *onto == "" ||
*startRev == "" ||
*endRev == "" {
return nil, errors.New("-onto, -start, and -end are required")
}
if err := proj.openProj(); err != nil {
return nil, err
}
commits, err := proj.GetCommitRangeByRevision(
plumbing.Revision(*startRev),
plumbing.Revision(*endRev),
)
if err != nil {
return nil, fmt.Errorf("error getting commits %q to %q: %w",
*startRev, *endRev, err)
}
ontoBranch := plumbing.NewBranchReferenceName(*onto)
commit, err := proj.CombinePayloadChanges(commits, ontoBranch)
if err != nil {
return nil, err
}
fmt.Printf("new commit %q added to branch %q\n", commit.Hash, ontoBranch.Short())
return nil, nil
})
}

@ -0,0 +1,66 @@
package main
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"os"
"strings"
"dehub.dev/src/dehub.git/cmd/dehub/dcmd"
"gopkg.in/src-d/go-git.v4/plumbing"
)
func cmdHook(ctx context.Context, cmd *dcmd.Cmd) {
flag := cmd.FlagSet()
var proj proj
proj.initFlags(flag)
body := func() (context.Context, error) {
if err := proj.openProj(); err != nil {
return nil, err
}
return ctx, nil
}
cmd.SubCmd("pre-receive", "Use dehub as a server-side pre-receive hook",
func(ctx context.Context, cmd *dcmd.Cmd) {
cmd.Run(func() (context.Context, error) {
br := bufio.NewReader(os.Stdin)
for {
line, err := br.ReadString('\n')
if errors.Is(err, io.EOF) {
break
} else if err != nil {
return nil, fmt.Errorf("error reading next line from stdin: %w", err)
}
fmt.Printf("Processing line %q\n", strings.TrimSpace(line))
lineParts := strings.Fields(line)
if len(lineParts) < 3 {
return nil, fmt.Errorf("malformed pre-receive hook stdin line %q", line)
}
endHash := plumbing.NewHash(lineParts[1])
branchName := plumbing.ReferenceName(lineParts[2])
if !branchName.IsBranch() {
return nil, fmt.Errorf("reference %q is not a branch, can't push to it", branchName)
} else if endHash == plumbing.ZeroHash {
return nil, errors.New("deleting remote branches is not currently supported")
}
return nil, proj.VerifyCanSetBranchHEADTo(branchName, endHash)
}
fmt.Println("All pushed commits have been verified, well done.")
return nil, nil
})
},
)
cmd.Run(body)
}

@ -0,0 +1,27 @@
package main
import (
"context"
"fmt"
"dehub.dev/src/dehub.git"
"dehub.dev/src/dehub.git/cmd/dehub/dcmd"
)
func cmdInit(ctx context.Context, cmd *dcmd.Cmd) {
flag := cmd.FlagSet()
path := flag.String("path", ".", "Path to initialize the project at")
bare := flag.Bool("bare", false, "Initialize the git repo as a bare repository")
remote := flag.Bool("remote", false, "Configure the git repo to allow it to be used as a remote endpoint")
cmd.Run(func() (context.Context, error) {
_, err := dehub.InitProject(*path,
dehub.InitBareRepo(*bare),
dehub.InitRemoteRepo(*remote),
)
if err != nil {
return nil, fmt.Errorf("initializing repo at %q: %w", *path, err)
}
return nil, nil
})
}

@ -0,0 +1,75 @@
package main
import (
"errors"
"flag"
"fmt"
"os"
"dehub.dev/src/dehub.git"
"gopkg.in/src-d/go-git.v4/plumbing"
)
type proj struct {
bare bool
*dehub.Project
}
func (proj *proj) initFlags(flag *flag.FlagSet) {
flag.BoolVar(&proj.bare, "bare", false, "If set then the project being opened will be expected to have a bare git repo")
}
func (proj *proj) openProj() error {
var err error
if proj.Project, err = dehub.OpenProject(".", dehub.OpenBareRepo(proj.bare)); err != nil {
wd, _ := os.Getwd()
return fmt.Errorf("opening repo at %q: %w", wd, err)
}
return nil
}
// softReset resets to HEAD^ (or to an orphaned index, if HEAD has no parents),
// returning the old HEAD.
func (proj *proj) softReset(expType string) (dehub.Commit, error) {
head, err := proj.GetHeadCommit()
if err != nil {
return head, fmt.Errorf("getting HEAD commit: %w", err)
} else if typ := head.Payload.Type(); expType != "" && typ != expType {
return head, fmt.Errorf("expected HEAD to be have a %q payload, but found a %q payload",
expType, typ)
}
branchName, branchErr := proj.ReferenceToBranchName(plumbing.HEAD)
numParents := head.Object.NumParents()
if numParents > 1 {
return head, errors.New("cannot reset to parent of a commit with multiple parents")
} else if numParents == 0 {
// if there are no parents then HEAD is the only commit in the branch.
// Don't handle ErrNoBranchReference because there's not really anything
// which can be done for that; we can't set head to "no commit".
// Otherwise, just remove the branch reference, HEAD will still point to
// it and all of HEAD's changes will be in the index.
if branchErr != nil {
return head, branchErr
} else if err := proj.GitRepo.Storer.RemoveReference(branchName); err != nil {
return head, fmt.Errorf("removing reference %q: %w", branchName, err)
}
return head, nil
}
refName := branchName
if errors.Is(branchErr, dehub.ErrNoBranchReference) {
refName = plumbing.HEAD
} else if err != nil {
return head, fmt.Errorf("resolving HEAD: %w", err)
}
parentHash := head.Object.ParentHashes[0]
newHeadRef := plumbing.NewHashReference(refName, parentHash)
if err := proj.GitRepo.Storer.SetReference(newHeadRef); err != nil {
return head, fmt.Errorf("storing reference %q: %w", newHeadRef, err)
}
return head, nil
}

@ -0,0 +1,48 @@
package main
import (
"context"
"fmt"
"dehub.dev/src/dehub.git"
"dehub.dev/src/dehub.git/cmd/dehub/dcmd"
"gopkg.in/src-d/go-git.v4/plumbing"
)
func cmdVerify(ctx context.Context, cmd *dcmd.Cmd) {
flag := cmd.FlagSet()
rev := flag.String("rev", "HEAD", "Revision of commit to verify")
branch := flag.String("branch", "", "Branch that the revision is on. If not given then the currently checked out branch is assumed")
var proj proj
proj.initFlags(flag)
cmd.Run(func() (context.Context, error) {
if err := proj.openProj(); err != nil {
return nil, err
}
commit, err := proj.GetCommitByRevision(plumbing.Revision(*rev))
if err != nil {
return nil, fmt.Errorf("resolving revision %q: %w", *rev, err)
}
var branchName plumbing.ReferenceName
if *branch == "" {
if branchName, err = proj.ReferenceToBranchName(plumbing.HEAD); err != nil {
return nil, fmt.Errorf("determining branch at HEAD: %w", err)
}
} else {
branchName = plumbing.NewBranchReferenceName(*branch)
}
if err := proj.VerifyCommits(branchName, []dehub.Commit{commit}); err != nil {
return nil, fmt.Errorf("could not verify commit at %q (%s): %w",
*rev, commit.Hash, err)
}
fmt.Printf("commit at %q (%s) is good to go!\n", *rev, commit.Hash)
return nil, nil
})
}

@ -0,0 +1,191 @@
// Package dcmd implements command and sub-command parsing and runtime
// management. It wraps the stdlib flag package as well, to incorporate
// configuration into the mix.
package dcmd
import (
"context"
"errors"
"flag"
"fmt"
"os"
"sort"
"strings"
)
func exitErr(err error) {
fmt.Fprintf(os.Stderr, "exiting: %v\n", err)
os.Stderr.Sync()
os.Stdout.Sync()
os.Exit(1)
}
type subCmd struct {
name, descr string
run func(context.Context, *Cmd)
}
// Cmd wraps a flag.FlagSet instance to provide extra functionality that dehub
// wants, specifically around sub-command support.
type Cmd struct {
flagSet *flag.FlagSet
binary string // only gets set on root Cmd, during Run
subCmds []subCmd
// these fields get set by the parent Cmd, if this is a sub-command.
name string
args []string
parent *Cmd
}
// New initializes and returns an empty Cmd instance.
func New() *Cmd {
return &Cmd{}
}
func (cmd *Cmd) getFlagSet() *flag.FlagSet {
if cmd.flagSet == nil {
cmd.flagSet = flag.NewFlagSet(cmd.name, flag.ContinueOnError)
}
return cmd.flagSet
}
func (cmd *Cmd) numFlags() int {
var n int
cmd.getFlagSet().VisitAll(func(*flag.Flag) {
n++
})
return n
}
// FlagSet returns a flag.Cmd instance on which parameter creation methods can
// be called, e.g. String(...) or Int(...).
func (cmd *Cmd) FlagSet() *flag.FlagSet {
return cmd.getFlagSet()
}
// SubCmd registers a sub-command of this Cmd.
//
// A new Cmd will be instantiated when this sub-command is picked on the
// command-line during this Cmd's Run method. The Context returned from that Run
// and the new Cmd will be passed into the callback given here. The sub-command
// should then be performed in the same manner as this Cmd is performed
// (including setting flags, adding sub-sub-commands, etc...)
func (cmd *Cmd) SubCmd(name, descr string, run func(context.Context, *Cmd)) {
cmd.subCmds = append(cmd.subCmds, subCmd{
name: name,
descr: descr,
run: run,
})
// it's not the most efficient to do this here, but it is the easiest
sort.Slice(cmd.subCmds, func(i, j int) bool {
return cmd.subCmds[i].name < cmd.subCmds[j].name
})
}
func (cmd *Cmd) printUsageHead(subCmdTitle string) {
hasFlags := cmd.numFlags() > 0
var title string
if cmd.parent == nil {
title = fmt.Sprintf("USAGE: %s", cmd.binary)
if hasFlags {
title += " [flags]"
}
} else {
title = fmt.Sprintf("%s", cmd.name)
if hasFlags {
title += fmt.Sprintf(" [%s flags]", cmd.name)
}
}
if subCmdTitle != "" {
title += " " + subCmdTitle
} else if len(cmd.subCmds) > 0 {
title += fmt.Sprint(" <sub-command> [sub-command flags]")
}
if cmd.parent == nil {
fmt.Printf("\n%s\n\n", title)
} else {
cmd.parent.printUsageHead(title)
}
if hasFlags {
if cmd.parent == nil {
fmt.Print("### FLAGS ###\n\n")
} else {
fmt.Printf("### %s FLAGS ###\n\n", strings.ToUpper(cmd.name))
}
cmd.getFlagSet().PrintDefaults()
fmt.Print("\n")
}
}
// Run performs the comand. It starts by parsing all flags in the Cmd's FlagSet,
// and possibly exiting with a usage message if appropriate. It will then
// perform the given body callback, and then perform any sub-commands (if
// selected).
//
// The context returned from the callback will be passed into the callback
// (given to SubCmd) of any sub-commands which are run, and so on.
func (cmd *Cmd) Run(body func() (context.Context, error)) {
args := cmd.args
if cmd.parent == nil {
cmd.binary, args = os.Args[0], os.Args[1:]
}
fs := cmd.getFlagSet()
fs.Usage = func() {
cmd.printUsageHead("")
if len(cmd.subCmds) == 0 {
return
}
fmt.Printf("### SUB-COMMANDS ###\n\n")
for _, subCmd := range cmd.subCmds {
fmt.Printf("\t%s : %s\n", subCmd.name, subCmd.descr)
}
fmt.Println("")
}
if err := fs.Parse(args); err != nil {
exitErr(err)
return
}
ctx, err := body()
if err != nil {
exitErr(err)
}
// body has run, now do sub-command (if there is one)
subArgs := fs.Args()
if len(cmd.subCmds) == 0 {
return
} else if len(subArgs) == 0 && len(cmd.subCmds) > 0 {
fs.Usage()
exitErr(errors.New("no sub-command selected"))
}
// now find that sub-command
subCmdName := strings.ToLower(subArgs[0])
var subCmd subCmd
var subCmdOk bool
for _, subCmd = range cmd.subCmds {
if subCmdOk = subCmd.name == subCmdName; subCmdOk {
break
}
}
if !subCmdOk {
fs.Usage()
exitErr(fmt.Errorf("unknown command %q", subCmdName))
}
subCmdCmd := New()
subCmdCmd.name = subCmd.name
subCmdCmd.args = subArgs[1:]
subCmdCmd.parent = cmd
subCmd.run(ctx, subCmdCmd)
}

@ -1,136 +1,20 @@
package main
import (
"dehub"
"errors"
"flag"
"fmt"
"os"
"strings"
"context"
"gopkg.in/src-d/go-git.v4/plumbing"
"dehub.dev/src/dehub.git/cmd/dehub/dcmd"
)
type subCmdCtx struct {
repo *dehub.Repo
args []string
}
var subCmds = []struct {
name, descr string
body func(sctx subCmdCtx) error
}{
{
name: "commit",
descr: "commits staged changes to the head of the current branch",
body: func(sctx subCmdCtx) error {
flag := flag.NewFlagSet("commit", flag.ExitOnError)
msg := flag.String("msg", "", "Commit message to use")
accountID := flag.String("account-id", "", "Account to sign commit as")
flag.Parse(sctx.args)
if *msg == "" || *accountID == "" {
return errors.New("-msg and -account-id are both required")
}
cfg, err := sctx.repo.LoadConfig()
if err != nil {
return err
}
var account dehub.Account
var ok bool
for _, account = range cfg.Accounts {
if account.ID == *accountID {
ok = true
break
}
}
if !ok {
return fmt.Errorf("account ID %q not found in config", *accountID)
} else if l := len(account.Signifiers); l == 0 || l > 1 {
return fmt.Errorf("account %q has %d signifiers, only one is supported right now", *accountID, l)
}
sig := account.Signifiers[0]
sigInt, err := sig.Interface()
if err != nil {
return fmt.Errorf("could not cast %+v to SignifierInterface: %w", sig, err)
}
_, hash, err := sctx.repo.CommitMaster(*msg, *accountID, sigInt)
if err != nil {
return err
}
fmt.Printf("changes committed to HEAD as %s\n", hash)
return nil
},
},
{
name: "verify",
descr: "verifies one or more commits as having the proper credentials",
body: func(sctx subCmdCtx) error {
flag := flag.NewFlagSet("verify", flag.ExitOnError)
rev := flag.String("rev", "HEAD", "Revision of commit to verify")
flag.Parse(sctx.args)
h, err := sctx.repo.GitRepo.ResolveRevision(plumbing.Revision(*rev))
if err != nil {
return fmt.Errorf("could not resolve revision %q: %w", *rev, err)
}
if err := sctx.repo.VerifyMasterCommit(*h); err != nil {
return fmt.Errorf("could not verify commit at %q (%s): %w", *rev, *h, err)
}
fmt.Printf("commit at %q (%s) is good to go!\n", *rev, *h)
return nil
},
},
}
func printHelp() {
fmt.Printf("USAGE: %s <command> [-h]\n\n", os.Args[0])
fmt.Println("COMMANDS")
for _, subCmd := range subCmds {
fmt.Printf("\t%s : %s\n", subCmd.name, subCmd.descr)
}
}
func exitErr(err error) {
fmt.Fprintf(os.Stderr, "exiting: %v\n", err)
os.Stderr.Sync()
os.Stdout.Sync()
os.Exit(1)
}
func main() {
if len(os.Args) < 2 {
printHelp()
return
}
subCmdName := strings.ToLower(os.Args[1])
for _, subCmd := range subCmds {
if subCmd.name != subCmdName {
continue
}
r, err := dehub.OpenRepo(".")
if err != nil {
exitErr(err)
}
err = subCmd.body(subCmdCtx{
repo: r,
args: os.Args[2:],
})
if err != nil {
exitErr(err)
}
return
}
fmt.Printf("unknown command %q\n\n", subCmdName)
printHelp()
cmd := dcmd.New()
cmd.SubCmd("init", "Initialize a new project in a directory", cmdInit)
cmd.SubCmd("commit", "Commits staged changes to the head of the current branch", cmdCommit)
cmd.SubCmd("verify", "Verifies one or more commits as having the proper credentials", cmdVerify)
cmd.SubCmd("hook", "Use dehub as a git hook", cmdHook)
cmd.SubCmd("combine", "Combine multiple change and credential commits into a single commit", cmdCombine)
cmd.Run(func() (context.Context, error) {
return context.Background(), nil
})
}

@ -0,0 +1,72 @@
package main
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"strings"
)
const defaultCommitFileMsgTpl = `%s
# Please enter the description for your commit(s). Lines starting with '#' will
# be ignored, and an empty message aborts the commit.`
func tmpFileMsg(tpl string, args ...interface{}) (string, error) {
editor := os.Getenv("EDITOR")
if editor == "" {
return "", errors.New("EDITOR not set, please set it or use -msg in order to create your commit message")
} else if _, err := os.Stat(editor); err != nil {
return "", fmt.Errorf("could not stat EDITOR %q: %w", editor, err)
}
tmpf, err := ioutil.TempFile("", "dehub.*.txt")
if err != nil {
return "", fmt.Errorf("could not open temp file: %w", err)
}
tmpfName := tmpf.Name()
defer os.Remove(tmpfName)
tmpBody := bytes.NewBufferString(fmt.Sprintf(tpl, args...))
_, err = io.Copy(tmpf, tmpBody)
tmpf.Close()
if err != nil {
return "", fmt.Errorf("could not write helper message to temp file %q: %w", tmpfName, err)
}
cmd := exec.Command(editor, tmpfName)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return "", fmt.Errorf("error running '%s %q': %w", editor, tmpfName, err)
}
body, err := ioutil.ReadFile(tmpfName)
if err != nil {
return "", fmt.Errorf("error retrieving message body from %q: %w", tmpfName, err)
}
bodyFiltered := new(bytes.Buffer)
bodyBR := bufio.NewReader(bytes.NewBuffer(body))
for {
line, err := bodyBR.ReadString('\n')
if errors.Is(err, io.EOF) {
break
} else if err != nil {
return "", fmt.Errorf("error reading from buffered body: %w", err)
}
if !strings.HasPrefix(strings.TrimSpace(line), "#") {
bodyFiltered.WriteString(line)
}
}
return strings.TrimSpace(bodyFiltered.String()), nil
}

@ -0,0 +1,27 @@
# git-http-server
A simple http server which uses a git repo (bare or otherwise) as the underlying
filesystem.
* Automatically renders markdown files as html.
* Will use `README.md` as the index, if available.
* Can be set to use a specific branch.
All configuration is done on the command-line.
# Installation
Installation of git-http-server is done in the same manner as the `dehub`
command itself:
```
go get dehub.dev/src/dehub.git/cmd/git-http-server
```
# Markdown
TODO
# Templates
TODO

@ -0,0 +1,11 @@
module dehub/cmd/git-http-server
go 1.14
require (
github.com/gomarkdown/markdown v0.0.0-20200513213024-62c5e2c608cc
golang.org/x/crypto v0.0.0-20200109152110-61a87790db17 // indirect
gopkg.in/src-d/go-git.v4 v4.13.1
)
replace dehub => ../../

@ -0,0 +1,79 @@
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg=
github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0=
github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/gomarkdown/markdown v0.0.0-20200513213024-62c5e2c608cc h1:T+Fwk3llJdUIQeBI8fC/ARqRD5mWy3AE5I6ZU3VkIw8=
github.com/gomarkdown/markdown v0.0.0-20200513213024-62c5e2c608cc/go.mod h1:aii0r/K0ZnHv7G0KF7xy1v0A7s2Ljrb5byB7MO5p6TU=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY=
github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4=
github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70=
github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4=
golang.org/dl v0.0.0-20190829154251-82a15e2f2ead/go.mod h1:IUMfjQLJQd4UTqG1Z90tenwKoCX93Gn3MAQJMOSBsDQ=
golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200109152110-61a87790db17 h1:nVJ3guKA9qdkEQ3TUdXI9QSINo2CUPM/cySEvw2w8I0=
golang.org/x/crypto v0.0.0-20200109152110-61a87790db17/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e h1:D5TXcfTk7xF7hvieo4QErS3qqCB4teTffacDWr7CI+0=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/src-d/go-billy.v4 v4.3.2 h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg=
gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98=
gopkg.in/src-d/go-git-fixtures.v3 v3.5.0 h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOAJK+LsJg=
gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
gopkg.in/src-d/go-git.v4 v4.13.1 h1:SRtFyV8Kxc0UP7aCHcijOMQGPxHSmMOPrzulQWolkYE=
gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8=
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=

@ -0,0 +1,154 @@
package main
import (
"bytes"
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"path/filepath"
"strings"
"text/template"
"time"
"github.com/gomarkdown/markdown"
"gopkg.in/src-d/go-git.v4"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/object"
)
type handler struct {
repo *git.Repository
branch plumbing.ReferenceName
tpl *template.Template
}
func (h handler) getTree(r *http.Request) (*object.Tree, int, error) {
rev := plumbing.Revision(r.FormValue("rev"))
if rev == "" {
rev = plumbing.Revision(h.branch)
}
hashPtr, err := h.repo.ResolveRevision(rev)
if err != nil {
return nil, 404, fmt.Errorf("resolving revision %q: %w", rev, err)
}
hash := *hashPtr // I don't know why ResolveRevision returns a pointer
commit, err := h.repo.CommitObject(hash)
if err != nil {
return nil, 404, fmt.Errorf("retrieving commit for revision %q (%q): %w",
rev, hash, err)
}
tree, err := h.repo.TreeObject(commit.TreeHash)
if err != nil {
return nil, 500, fmt.Errorf("fetching tree %q of commit %q: %v",
commit.TreeHash, hash, err)
}
return tree, 0, nil
}
func (h handler) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
path := r.URL.Path
var mdPath string
if strings.HasSuffix(path, "/") {
mdPath = filepath.Join(path, "README.md") // do before modifying path
path = filepath.Join(path, "index.html")
} else if strings.HasSuffix(path, "/index.html") {
mdPath = filepath.Join(filepath.Dir(path), "README.md")
} else if filepath.Ext(path) == ".html" {
mdPath = strings.TrimSuffix(path, ".html") + ".md"
}
path = strings.TrimPrefix(path, "/")
mdPath = strings.TrimPrefix(mdPath, "/")
tree, errStatusCode, err := h.getTree(r)
if err != nil {
http.Error(rw, err.Error(), errStatusCode)
return
}
var usingMD bool
f, err := tree.File(path)
if errors.Is(err, object.ErrFileNotFound) {
usingMD = true
f, err = tree.File(mdPath)
}
if errors.Is(err, object.ErrFileNotFound) {
http.Error(rw, fmt.Sprintf("%q not found", path), 404)
return
} else if err != nil {
log.Printf("fetching file %q / %q: %v", path, mdPath, err)
http.Error(rw, "internal error", 500)
return
}
fr, err := f.Blob.Reader()
if err != nil {
log.Printf("getting reader of file %q: %v", f.Name, err)
http.Error(rw, "internal error", 500)
return
}
defer fr.Close()
b, err := ioutil.ReadAll(fr)
if err != nil {
log.Printf("reading in contents of file %q: %v", f.Name, err)
http.Error(rw, "internal error", 500)
return
}
if !usingMD {
http.ServeContent(rw, r, filepath.Base(path), time.Now(), bytes.NewReader(b))
return
}
mdHTML := markdown.ToHTML(b, nil, nil)
if h.tpl == nil {
http.ServeContent(rw, r, filepath.Base(path), time.Now(), bytes.NewReader(mdHTML))
return
}
h.tpl.Execute(rw, struct {
Body string
}{string(mdHTML)})
}
func main() {
addr := flag.String("addr", ":8000", "Address to listen for http requests on")
branchName := flag.String("branch", "master", "git branch to serve the HEAD of")
repoPath := flag.String("repo-path", ".", "Path to the git repository to server")
tplPath := flag.String("tpl-path", "", "Path to an optional template file which can be used when rendering markdown")
flag.Parse()
repo, err := git.PlainOpen(*repoPath)
if err != nil {
log.Fatalf("opening git repo at path %q: %v", *repoPath, err)
}
branch := plumbing.NewBranchReferenceName(*branchName)
// do an initial check for the branch, for funsies
if _, err := repo.Reference(branch, true); err != nil {
log.Fatalf("resolving reference %q: %v", branch, err)
}
h := &handler{
repo: repo,
branch: branch,
}
if *tplPath != "" {
h.tpl = template.Must(template.ParseFiles(*tplPath))
}
log.Printf("listening on %q", *addr)
http.ListenAndServe(*addr, h)
}

@ -1,251 +1,222 @@
package dehub
import (
"bytes"
"dehub/accessctl"
"dehub/fs"
"dehub/sigcred"
"dehub/yamlutil"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"path/filepath"
"strings"
"time"
"gopkg.in/src-d/go-git.v4"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/object"
yaml "gopkg.in/yaml.v2"
)
// MasterCommit describes the structure of the object encoded into the git
// message of a commit in the master branch.
type MasterCommit struct {
Message string `yaml:"message"`
ChangeHash yamlutil.Blob `yaml:"change_hash"`
Credentials []sigcred.Credential `yaml:"credentials"`
}
type mcYAML struct {
Val MasterCommit `yaml:",inline"`
}
// Commit wraps a single git commit object, and also contains various fields
// which are parsed out of it, including the payload. It is used as a
// convenience type, in place of having to manually retrieve and parse specific
// information out of commit objects.
type Commit struct {
Payload PayloadUnion
func msgHead(msg string) string {
i := strings.Index(msg, "\n")
if i > 0 {
return msg[:i]
}
return msg
Hash plumbing.Hash
Object *object.Commit
TreeObject *object.Tree
}
// MarshalText implements the encoding.TextMarshaler interface by returning the
// form the MasterCommit object takes in the git commit message.
func (mc MasterCommit) MarshalText() ([]byte, error) {
masterCommitEncoded, err := yaml.Marshal(mcYAML{mc})
if err != nil {
return nil, fmt.Errorf("failed to encode MasterCommit message: %w", err)
}
fullMsg := msgHead(mc.Message) + "\n\n" + string(masterCommitEncoded)
return []byte(fullMsg), nil
// GetCommit retrieves the Commit at the given hash, and all of its sub-data
// which can be pulled out of it.
func (proj *Project) GetCommit(h plumbing.Hash) (c Commit, err error) {
if c.Object, err = proj.GitRepo.CommitObject(h); err != nil {
return c, fmt.Errorf("getting git commit object: %w", err)
} else if c.TreeObject, err = proj.GitRepo.TreeObject(c.Object.TreeHash); err != nil {
return c, fmt.Errorf("getting git tree object %q: %w",
c.Object.TreeHash, err)
} else if err = c.Payload.UnmarshalText([]byte(c.Object.Message)); err != nil {
return c, fmt.Errorf("decoding commit message: %w", err)
}
c.Hash = c.Object.Hash
return
}
// UnmarshalText implements the encoding.TextUnmarshaler interface by decoding a
// MasterCommit object which has been encoded into a git commit message.
func (mc *MasterCommit) UnmarshalText(msg []byte) error {
i := bytes.Index(msg, []byte("\n"))
if i < 0 {
return fmt.Errorf("commit message %q is malformed", msg)
}
msgHead, msg := msg[:i], msg[i:]
// ErrHeadIsZero is used to indicate that HEAD resolves to the zero hash. An
// example of when this can happen is if the project was just initialized and
// has no commits, or if an orphan branch is checked out.
var ErrHeadIsZero = errors.New("HEAD resolves to the zero hash")
var mcy mcYAML
if err := yaml.Unmarshal(msg, &mcy); err != nil {
return fmt.Errorf("could not unmarshal MasterCommit message: %w", err)
}
*mc = mcy.Val
if !strings.HasPrefix(mc.Message, string(msgHead)) {
return errors.New("encoded MasterCommit is malformed, it might not be an encoded MasterCommit")
}
return nil
}
// CommitMaster constructs a MasterCommit using the given SignifierInterface to
// create a Credential for it. It returns the commit's hash after having set it
// to HEAD.
//
// TODO this method is a prototype and does not reflect the method's final form.
func (r *Repo) CommitMaster(msg, accountID string, sig sigcred.SignifierInterface) (MasterCommit, plumbing.Hash, error) {
_, headTree, err := r.head()
if errors.Is(err, plumbing.ErrReferenceNotFound) {
headTree = &object.Tree{}
} else if err != nil {
return MasterCommit{}, plumbing.ZeroHash, err
}
_, stagedTree, err := fs.FromStagedChangesTree(r.GitRepo)
// GetHeadCommit returns the Commit which is currently referenced by HEAD.
// This method may return ErrHeadIsZero if HEAD resolves to the zero hash.
func (proj *Project) GetHeadCommit() (Commit, error) {
headHash, err := proj.ReferenceToHash(plumbing.HEAD)
if err != nil {
return MasterCommit{}, plumbing.ZeroHash, err
return Commit{}, fmt.Errorf("resolving HEAD: %w", err)
} else if headHash == plumbing.ZeroHash {
return Commit{}, ErrHeadIsZero
}
// this is necessarily different than headTree for the case of there being
// no HEAD (ie it's the first commit). In that case we want headTree to be
// empty (because it's being used to generate the change hash), but we want
// the signifier to use the raw fs (because that's where the signifier's
// data might be).
sigFS, err := r.headOrRawFS()
c, err := proj.GetCommit(headHash)
if err != nil {
return MasterCommit{}, plumbing.ZeroHash, err
}
cfg, err := r.loadConfig(sigFS)
if err != nil {
return MasterCommit{}, plumbing.ZeroHash, fmt.Errorf("could not load config: %w", err)
return Commit{}, fmt.Errorf("getting commit %q: %w", headHash, err)
}
return c, nil
}
changeHash := genChangeHash(nil, msg, headTree, stagedTree)
cred, err := sig.Sign(sigFS, changeHash)
// GetCommitRange returns an ancestry of Commits, with the first being the
// commit immediately following the given starting hash, and the last being the
// given ending hash.
//
// If start is plumbing.ZeroHash then the root commit will be the starting hash.
func (proj *Project) GetCommitRange(start, end plumbing.Hash) ([]Commit, error) {
curr, err := proj.GetCommit(end)
if err != nil {
return MasterCommit{}, plumbing.ZeroHash, fmt.Errorf("failed to sign commit hash: %w", err)
return nil, fmt.Errorf("retrieving commit %q: %w", end, err)
}
cred.AccountID = accountID
// This isn't strictly necessary, but we want to save people the effort of
// creating an invalid commit, pushing it, having it be rejected, then
// having to reset on the commit.
err = r.assertAccessControls(
cfg.AccessControls, []sigcred.Credential{cred},
headTree, stagedTree,
)
if err != nil {
return MasterCommit{}, plumbing.ZeroHash, fmt.Errorf("commit would not satisfy access controls: %w", err)
}
var commits []Commit
var found bool
for {
if found = start != plumbing.ZeroHash && curr.Hash == start; found {
break
}
masterCommit := MasterCommit{
Message: msg,
ChangeHash: changeHash,
Credentials: []sigcred.Credential{cred},
}
commits = append(commits, curr)
numParents := curr.Object.NumParents()
if numParents == 0 {
break
} else if numParents > 1 {
return nil, fmt.Errorf("commit %q has more than one parent: %+v",
curr.Hash, curr.Object.ParentHashes)
}
masterCommitB, err := masterCommit.MarshalText()
if err != nil {
return masterCommit, plumbing.ZeroHash, err
parentHash := curr.Object.ParentHashes[0]
parent, err := proj.GetCommit(parentHash)
if err != nil {
return nil, fmt.Errorf("retrieving commit %q: %w", parentHash, err)
}
curr = parent
}
w, err := r.GitRepo.Worktree()
if err != nil {
return masterCommit, plumbing.ZeroHash, fmt.Errorf("could not get git worktree: %w", err)
if !found && start != plumbing.ZeroHash {
return nil, fmt.Errorf("unable to find commit %q as an ancestor of %q",
start, end)
}
hash, err := w.Commit(string(masterCommitB), &git.CommitOptions{
Author: &object.Signature{
Name: accountID,
When: time.Now(),
},
})
if err != nil {
return masterCommit, hash, fmt.Errorf("failed to commit changed: %w", err)
// reverse the commits to be in the expected order
for l, r := 0, len(commits)-1; l < r; l, r = l+1, r-1 {
commits[l], commits[r] = commits[r], commits[l]
}
return masterCommit, hash, nil
return commits, nil
}
func (r *Repo) assertAccessControls(
accessCtls []accessctl.AccessControl, creds []sigcred.Credential,
from, to *object.Tree,
) error {
filesChanged, err := calcDiff(from, to)
if err != nil {
return err
}
var (
hashStrLen = len(plumbing.ZeroHash.String())
errNotHex = errors.New("not a valid hex string")
)
pathsChanged := make([]string, len(filesChanged))
for i := range filesChanged {
pathsChanged[i] = filesChanged[i].path
func (proj *Project) findCommitByShortHash(hashStr string) (plumbing.Hash, error) {
paddedHashStr := hashStr
if len(hashStr)%2 > 0 {
paddedHashStr += "0"
}
accessCtls, err = accessctl.ApplicableAccessControls(accessCtls, pathsChanged)
if err != nil {
return fmt.Errorf("could not determine applicable access controls: %w", err)
if hashB, err := hex.DecodeString(paddedHashStr); err != nil {
return plumbing.ZeroHash, errNotHex
} else if len(hashStr) == hashStrLen {
var hash plumbing.Hash
copy(hash[:], hashB)
return hash, nil
} else if len(hashStr) < 2 {
return plumbing.ZeroHash, errors.New("hash string must be 2 characters long or more")
}
for _, accessCtl := range accessCtls {
condInt, err := accessCtl.Condition.Interface()
for i := 2; i < hashStrLen; i++ {
hashPrefix, hashTail := hashStr[:i], hashStr[i:]
path := filepath.Join("objects", hashPrefix)
fileInfos, err := proj.GitDirFS.ReadDir(path)
if err != nil {
return fmt.Errorf("could not cast Condition to interface: %w", err)
} else if err := condInt.Satisfied(creds); err != nil {
return fmt.Errorf("access control for pattern %q not satisfied: %w",
accessCtl.Pattern, err)
return plumbing.ZeroHash, fmt.Errorf("listing files in %q: %w", path, err)
}
}
return nil
}
// VerifyMasterCommit verifies that the commit at the given hash, which is
// presumably on the master branch, is gucci.
func (r *Repo) VerifyMasterCommit(h plumbing.Hash) error {
commit, err := r.GitRepo.CommitObject(h)
if err != nil {
return fmt.Errorf("could not retrieve commit object: %w", err)
}
var matchedHash plumbing.Hash
for _, fileInfo := range fileInfos {
objFileName := fileInfo.Name()
if !strings.HasPrefix(objFileName, hashTail) {
continue
}
objHash := plumbing.NewHash(hashPrefix + objFileName)
obj, err := proj.GitRepo.Storer.EncodedObject(plumbing.AnyObject, objHash)
if err != nil {
return plumbing.ZeroHash, fmt.Errorf("reading object %q off disk: %w", objHash, err)
} else if obj.Type() != plumbing.CommitObject {
continue
} else if matchedHash == plumbing.ZeroHash {
matchedHash = objHash
continue
}
return plumbing.ZeroHash, fmt.Errorf("both %q and %q match", matchedHash, objHash)
}
commitTree, err := r.GitRepo.TreeObject(commit.TreeHash)
if err != nil {
return fmt.Errorf("could not retrieve tree object: %w", err)
if matchedHash != plumbing.ZeroHash {
return matchedHash, nil
}
}
var masterCommit MasterCommit
if err := masterCommit.UnmarshalText([]byte(commit.Message)); err != nil {
return err
}
return plumbing.ZeroHash, errors.New("failed to find a commit object with a matching prefix")
}
sigTree := commitTree // only for root commit
parentTree := &object.Tree{}
if commit.NumParents() > 0 {
parent, err := commit.Parent(0)
if err != nil {
return fmt.Errorf("could not retrieve parent of commit: %w", err)
} else if parentTree, err = r.GitRepo.TreeObject(parent.TreeHash); err != nil {
return fmt.Errorf("could not retrieve tree object of parent %q: %w", parent.Hash, err)
func (proj *Project) resolveRev(rev plumbing.Revision) (plumbing.Hash, error) {
if rev == plumbing.Revision(plumbing.ZeroHash.String()) {
return plumbing.ZeroHash, nil
}
{
// pretend the revision is a short hash until proven otherwise
shortHash := string(rev)
hash, err := proj.findCommitByShortHash(shortHash)
if errors.Is(err, errNotHex) {
// ok, continue
} else if err != nil {
return plumbing.ZeroHash, fmt.Errorf("resolving as short hash: %w", err)
} else {
// guess it _is_ a short hash, knew it!
return hash, nil
}
sigTree = parentTree
}
sigFS := fs.FromTree(sigTree)
cfg, err := r.loadConfig(sigFS)
h, err := proj.GitRepo.ResolveRevision(rev)
if err != nil {
return fmt.Errorf("error loading config: %w", err)
return plumbing.ZeroHash, fmt.Errorf("resolving revision %q: %w", rev, err)
}
return *h, nil
}
err = r.assertAccessControls(
cfg.AccessControls, masterCommit.Credentials,
parentTree, commitTree,
)
// GetCommitByRevision resolves the revision and returns the Commit it references.
func (proj *Project) GetCommitByRevision(rev plumbing.Revision) (Commit, error) {
hash, err := proj.resolveRev(rev)
if err != nil {
return fmt.Errorf("failed to satisfy all access controls: %w", err)
return Commit{}, err
}
expectedChangeHash := genChangeHash(nil, masterCommit.Message, parentTree, commitTree)
if !bytes.Equal(masterCommit.ChangeHash, expectedChangeHash) {
return fmt.Errorf("malformed change_hash in commit body, is %s but should be %s",
base64.StdEncoding.EncodeToString(expectedChangeHash),
base64.StdEncoding.EncodeToString(masterCommit.ChangeHash))
c, err := proj.GetCommit(hash)
if err != nil {
return Commit{}, fmt.Errorf("getting commit %q: %w", hash, err)
}
return c, nil
}
for _, cred := range masterCommit.Credentials {
sig, err := r.signifierForCredential(sigFS, cred)
if err != nil {
return fmt.Errorf("error finding signifier for credential %+v: %w", cred, err)
} else if err := sig.Verify(sigFS, expectedChangeHash, cred); err != nil {
return fmt.Errorf("error verifying credential %+v: %w", cred, err)
}
// GetCommitRangeByRevision is like GetCommitRange, first resolving the given
// revisions into hashes before continuing with GetCommitRange's behavior.
func (proj *Project) GetCommitRangeByRevision(startRev, endRev plumbing.Revision) ([]Commit, error) {
start, err := proj.resolveRev(startRev)
if err != nil {
return nil, err
}
// TODO access controls
end, err := proj.resolveRev(endRev)
if err != nil {
return nil, err
}
return nil
return proj.GetCommitRange(start, end)
}

@ -1,170 +0,0 @@
package dehub
import (
"dehub/accessctl"
"dehub/sigcred"
"errors"
"reflect"
"strings"
"testing"
"github.com/davecgh/go-spew/spew"
"gopkg.in/src-d/go-git.v4/plumbing"
yaml "gopkg.in/yaml.v2"
)
func TestMasterCommitVerify(t *testing.T) {
type step struct {
msg string
msgHead string // defaults to msg
tree map[string]string
}
testCases := []struct {
descr string
steps []step
}{
{
descr: "single commit",
steps: []step{
{
msg: "first commit",
tree: map[string]string{"a": "0", "b": "1"},
},
},
},
{
descr: "multiple commits",
steps: []step{
{
msg: "first commit",
tree: map[string]string{"a": "0", "b": "1"},
},
{
msg: "second commit, changing a",
tree: map[string]string{"a": "1"},
},
{
msg: "third commit, empty",
},
{
msg: "fourth commit, adding c, removing b",
tree: map[string]string{"b": "", "c": "2"},
},
},
},
{
descr: "big body commits",
steps: []step{
{
msg: "first commit, single line but with newline\n",
},
{
msg: "second commit, single line but with two newlines\n\n",
msgHead: "second commit, single line but with two newlines\n\n",
},
{
msg: "third commit, multi-line with one newline\nanother line!",
msgHead: "third commit, multi-line with one newline\n\n",
},
{
msg: "fourth commit, multi-line with two newlines\n\nanother line!",
msgHead: "fourth commit, multi-line with two newlines\n\n",
},
},
},
}
for _, test := range testCases {
t.Run(test.descr, func(t *testing.T) {
h := newHarness(t)
for _, step := range test.steps {
h.stage(step.tree)
account := h.cfg.Accounts[0]
masterCommit, hash, err := h.repo.CommitMaster(step.msg, account.ID, h.sig)
if err != nil {
t.Fatalf("failed to make MasterCommit: %v", err)
} else if err := h.repo.VerifyMasterCommit(hash); err != nil {
t.Fatalf("could not verify hash %v: %v", hash, err)
}
commit, err := h.repo.GitRepo.CommitObject(hash)
if err != nil {
t.Fatalf("failed to retrieve commit %v: %v", hash, err)
} else if step.msgHead == "" {
step.msgHead = strings.TrimSpace(step.msg) + "\n\n"
}
if !strings.HasPrefix(commit.Message, step.msgHead) {
t.Fatalf("commit message %q does not start with expected head %q", commit.Message, step.msgHead)
}
var actualMasterCommit MasterCommit
if err := actualMasterCommit.UnmarshalText([]byte(commit.Message)); err != nil {
t.Fatalf("error unmarshaling commit body: %v", err)
} else if !reflect.DeepEqual(actualMasterCommit, masterCommit) {
t.Fatalf("returned master commit:\n%s\ndoes not match actual one:\n%s",
spew.Sdump(masterCommit), spew.Sdump(actualMasterCommit))
}
}
})
}
}
func TestConfigChange(t *testing.T) {
h := newHarness(t)
var hashes []plumbing.Hash
// commit the initial staged changes, which merely include the config and
// public key
_, hash, err := h.repo.CommitMaster("commit configuration", h.cfg.Accounts[0].ID, h.sig)
if err != nil {
t.Fatal(err)
}
hashes = append(hashes, hash)
// create a new account and add it to the configuration. It should not be
// able to actually make that commit though.
newSig, newPubKeyBody := sigcred.SignifierPGPTmp(h.rand)
h.cfg.Accounts = append(h.cfg.Accounts, Account{
ID: "toot",
Signifiers: []sigcred.Signifier{{PGPPublicKey: &sigcred.SignifierPGP{
Body: string(newPubKeyBody),
}}},
})
h.cfg.AccessControls[0].Condition.Signature.AccountIDs = []string{"root", "toot"}
h.cfg.AccessControls[0].Condition.Signature.Count = "1"
cfgBody, err := yaml.Marshal(h.cfg)
if err != nil {
t.Fatal(err)
}
h.stage(map[string]string{ConfigPath: string(cfgBody)})
_, _, err = h.repo.CommitMaster("add toot user", h.cfg.Accounts[1].ID, newSig)
if aclErr := (accessctl.ErrConditionSignatureUnsatisfied{}); !errors.As(err, &aclErr) {
t.Fatalf("CommitMaster should have returned an ErrConditionSignatureUnsatisfied, but returned %v", err)
}
// now add with the root user, this should work.
_, hash, err = h.repo.CommitMaster("add toot user", h.cfg.Accounts[0].ID, h.sig)
if err != nil {
t.Fatalf("got an unexpected error committing with root: %v", err)
}
hashes = append(hashes, hash)
// _now_ the toot user should be able to do things.
h.stage(map[string]string{"foo/bar": "what a cool file"})
_, hash, err = h.repo.CommitMaster("add a cool file", h.cfg.Accounts[1].ID, newSig)
if err != nil {
t.Fatalf("got an unexpected error committing with toot: %v", err)
}
hashes = append(hashes, hash)
for i, hash := range hashes {
if err := h.repo.VerifyMasterCommit(hash); err != nil {
t.Fatalf("commit %d (%v) should have been verified but wasn't: %v", i, hash, err)
}
}
}

@ -1,20 +1,21 @@
package dehub
import (
"dehub/accessctl"
"dehub/fs"
"dehub/sigcred"
"errors"
"fmt"
"dehub.dev/src/dehub.git/accessctl"
"dehub.dev/src/dehub.git/fs"
"dehub.dev/src/dehub.git/sigcred"
yaml "gopkg.in/yaml.v2"
)
// Account represents a single account defined in the Config.
type Account struct {
ID string `yaml:"id"`
Signifiers []sigcred.Signifier `yaml:"signifiers"`
Meta map[string]string `yaml:"meta,omitempty"`
ID string `yaml:"id"`
Signifiers []sigcred.SignifierUnion `yaml:"signifiers"`
Meta map[string]string `yaml:"meta,omitempty"`
}
// Config represents the structure of the main dehub configuration file, and is
@ -24,7 +25,7 @@ type Config struct {
AccessControls []accessctl.AccessControl `yaml:"access_controls"`
}
func (r *Repo) loadConfig(fs fs.FS) (Config, error) {
func (proj *Project) loadConfig(fs fs.FS) (Config, error) {
rc, err := fs.Open(ConfigPath)
if err != nil {
return Config{}, fmt.Errorf("could not open config.yml: %w", err)
@ -36,23 +37,34 @@ func (r *Repo) loadConfig(fs fs.FS) (Config, error) {
return cfg, fmt.Errorf("could not decode config.yml: %w", err)
}
// older config versions also had access_controls be an array, but not using
// the action field. So filter out array elements without the action field.
acl := cfg.AccessControls
cfg.AccessControls = cfg.AccessControls[:0]
for _, ac := range acl {
if ac.Action == "" {
continue
}
cfg.AccessControls = append(cfg.AccessControls, ac)
}
// TODO validate Config
return cfg, nil
}
// LoadConfig loads the Config object from the HEAD of the repo, or directly
// from the filesystem if there is no HEAD yet.
func (r *Repo) LoadConfig() (Config, error) {
headFS, err := r.headOrRawFS()
// LoadConfig loads the Config object from the HEAD of the project's git repo,
// or directly from the filesystem if there is no HEAD yet.
func (proj *Project) LoadConfig() (Config, error) {
headFS, err := proj.headFS()
if err != nil {
return Config{}, fmt.Errorf("error retrieving repo HEAD: %w", err)
}
return r.loadConfig(headFS)
return proj.loadConfig(headFS)
}
func (r *Repo) signifierForCredential(fs fs.FS, cred sigcred.Credential) (sigcred.SignifierInterface, error) {
cfg, err := r.loadConfig(fs)
func (proj *Project) signifierForCredential(fs fs.FS, cred sigcred.CredentialUnion) (sigcred.Signifier, error) {
cfg, err := proj.loadConfig(fs)
if err != nil {
return nil, fmt.Errorf("error loading config: %w", err)
}
@ -69,13 +81,12 @@ func (r *Repo) signifierForCredential(fs fs.FS, cred sigcred.Credential) (sigcre
return nil, fmt.Errorf("no account object for account id %q present in config", cred.AccountID)
}
for i, sig := range account.Signifiers {
if sigInt, err := sig.Interface(); err != nil {
return nil, fmt.Errorf("error converting signifier index:%d to inteface: %w", i, err)
} else if ok, err := sigInt.Signed(fs, cred); err != nil {
for i, sigUn := range account.Signifiers {
sig := sigUn.Signifier(cred.AccountID)
if ok, err := sig.Signed(fs, cred); err != nil {
return nil, fmt.Errorf("error checking if signfier index:%d signed credential: %w", i, err)
} else if ok {
return sigInt, nil
return sig, nil
}
}

@ -8,34 +8,38 @@ import (
"gopkg.in/src-d/go-git.v4/plumbing/object"
)
type fileChanged struct {
path string
fromMode, toMode filemode.FileMode
fromHash, toHash plumbing.Hash
// ChangedFile describes a single file which has been changed in some way
// between two object.Trees. If the From fields are empty then the file was
// created, if the To fields are empty then the file was deleted.
type ChangedFile struct {
Path string
FromMode, ToMode filemode.FileMode
FromHash, ToHash plumbing.Hash
}
func calcDiff(from, to *object.Tree) ([]fileChanged, error) {
// ChangedFilesBetweenTrees returns the ChangedFile objects which represent the
// difference between the two given trees.
func ChangedFilesBetweenTrees(from, to *object.Tree) ([]ChangedFile, error) {
changes, err := object.DiffTree(from, to)
if err != nil {
return nil, fmt.Errorf("could not calculate tree diff: %w", err)
}
filesChanged := make([]fileChanged, len(changes))
changedFiles := make([]ChangedFile, len(changes))
for i, change := range changes {
if from := change.From; from.Name != "" {
filesChanged[i].path = from.Name
filesChanged[i].fromMode = from.TreeEntry.Mode
filesChanged[i].fromHash = from.TreeEntry.Hash
changedFiles[i].Path = from.Name
changedFiles[i].FromMode = from.TreeEntry.Mode
changedFiles[i].FromHash = from.TreeEntry.Hash
}
if to := change.To; to.Name != "" {
if exPath := filesChanged[i].path; exPath != "" && exPath != to.Name {
panic(fmt.Sprintf("DiffTree entry changed path from %q to %q", exPath, to.Name))
if exPath := changedFiles[i].Path; exPath != "" && exPath != to.Name {
panic(fmt.Sprintf("unexpected changed path from %q to %q", exPath, to.Name))
}
filesChanged[i].path = to.Name
filesChanged[i].toMode = to.TreeEntry.Mode
filesChanged[i].toHash = to.TreeEntry.Hash
changedFiles[i].Path = to.Name
changedFiles[i].ToMode = to.TreeEntry.Mode
changedFiles[i].ToHash = to.TreeEntry.Hash
}
}
return filesChanged, nil
return changedFiles, nil
}

@ -0,0 +1,71 @@
# Roadmap
This document describes currently planned features and events related to the
dehub project. It's intention is to help prioritize work. There are no dates
set, only a sequence of milestones and the requirements to hit them.
## Milestone: IPFS support
* Big ol' question mark on this one.
## Milestone: Versions
* Tag commits
* Add dehub version to payloads, make binary aware of it
* Figure out a release system?
## Milestone: Prime commits
(Cloning/remote management is probably a pre-requisite of this, so it's a good
thing it comes after IPFS support)
* Ability to specify which commit is prime.
* The prime commit is essentially the identifier of the entire project; even
if two project instances share a commit tree, if they are using a
different prime commit then they are not the same project.
## Milestone: Minimal plugin support
* SPEC and implement. Things which should be pluggable, initially:
* Conditions
* Signifiers
* Filters
* Payloads???
## Milestone: Minimal notifications support
* Some way to store notification settings locally, and run a command which shows
a sequence of events since the last time you ran it.
* The command should keep a history of all of its outputs, and allow the
user to see that history (in case they run the command, then clear the
output by accident).
* The user should be able to specifically get notifications on threads
they're a part of, threads by branch name pattern, files by path pattern,
and keywords in commit messages.
# Misc Polish
These tasks aren't necessarily scheduled for any particular milestone, but they
are things that could use doing anyway.
* Config validation. Every interface used by the config should have a
`Validate() error` method, and Config itself should as well.
* Maybe coalesce the `accessctl`, `fs`, and `sigcred` packages back into the
root "dehub" package.
* Polish commands
* New flag system, some kind of interactivity support (e.g. user doesn't
specify required argument, give them a prompt on the CLI to input it
rather than an error). This is partially done, in that a new flag system
has been started. Needs further work.
* Review flags:
* probably make some of them into positional arguments
* add flag shortcuts
* document everything better.
* POSIX compatible-ish flags?
* Possibly save state locally in order to speed things along, such as
"account id" which probably isn't going to change often for a user.

@ -0,0 +1,501 @@
# SPEC
This document describes the dehub protocol.
This document assumes that the reader is familiar with git, both conceptually
and in practical use of the git tool. All references to a git-specific concept
retain their meaning; dehub concepts build upon git concepts, but do not
override them.
## Project {#project}
A dehub project is comprised of:
* A collection of files and directories.
* Meta actions related to those files, e.g. discussion, proposed changes, etc.
* Configuration defining which meta actions are allowed under which
circumstances.
All of these components are housed in a git repository. A dehub project does not
require a central repository location (a "remote"), though it may use one if
desired.
## Commit Payload {#payload}
All commits in a dehub [project](#project) contain a payload. The payload is
encoded into the commit message as a YAML object. Here is the general structure
of a commit message containing a payload:
```
Human readable message head
---
# Three dashes indicate the start of the yaml body.
type: type of the payload # Always required
fingerprint: std-base-64 string # Always required
credentials:[...] # Not required but usually present
type_specific_field_a: valueA
type_specific_field_b: valueB
```
The message head is a human readable description of what is being committed, and
is terminated at the first newline. Everything after the message head must be
valid YAML which encodes the payload.
### Fingerprint {#fingerprint}
Each [payload](#payload) object contains a `fingerprint` field. The fingerprint
is an opaque byte string encoded using standard base-64. The algorithm used to
generate the fingerprint will depend on the payload type, and can be found in
each type's sub-section in this document.
### Credential {#credential}
The `credentials` field is not required, but in practice will be found on almost
every [payload](#payload). The field's value will be an array of credential
objects. Only one credential object is currently supported, `pgp_signature`:
```yaml
type: pgp_signature
# One of these fields is required. If account_id is present, it relates the
# signature to a pgp_public_key signifier defined for that account in the config
# (see the Signifier sub-section). Otherwise, the public key will be included in
# the credential itself as the value of pub_key_body.
account_id: some_user_id # Optional
pub_key_body: inlined ASCII-armored pgp public key
# the ID (pgp fingerprint) of the key used to generate the signature
pub_key_id: XXX
# a signature of the payload's unencoded fingerprint, encoded using standard
# base-64
body: std-base-64 signature
```
### Payload Types {#payload-types}
#### Change Payload {#change-payload}
A change [payload](#payload) encompasses a set of changes to the files in the
project. To construct the change payload one must reference the file tree of the
commit which houses the payload as well as the file tree of its parent commit;
specifically one must take the difference between them.
A change payload looks like this:
```yaml
type: change
fingerprint: std-base-64 string
credentials: [...]
description: |-
The description will generally start with a single line, followed by a long-form body
The description corresponds to the body of a commit message in a "normal"
git repo. It gives a more-or-less long-form explanation of the changes being
made to the project's files.
```
##### Change Payload Fingerprint {#change-payload-fingerprint}
The unencoded [fingerprint](#fingerprint) of a [change payload](#change-payload)
is calculated as follows:
* Concatenate the following:
* A uvarint indicating the number of bytes in the description string.
* The description string.
* A uvarint indicating the number of files changed between this commit and
its parent.
* For each file changed, ordered lexographically-ascending based on its full
relative path within the git repo:
* A uvarint indicating the length of the full relative path of the file
within the repo, as a string.
* The full relative path of the file within the repo, as a string.
* A little-endian uint32 representing the previous file mode of the file
(or 0 if the file is not present in the parent commit's tree).
* The 20-byte SHA1 hash of the contents of the previous version of the file
(or 20 0 bytes if the file is not present in the parent commit's tree).
* A little-endian uint32 representing the new file mode of the file (or 0
if the file is not present in the current commit's tree).
* The 20-byte SHA1 hash of the contents of the new version of the file (or
20 0 bytes if the file is not present in the current commit's tree).
* Calculate the SHA-256 hash of the concatenation result.
* Prepend a 0 byte to the result of the SHA-256 hash.
This unencoded fingerprint is then standard base-64 encoded, and that is used as
the value of the `fingerprint` field.
#### Comment Payload {#comment-payload}
A comment [payload](#payload) encompasses no file changes, and is used only to
contain a comment made by a single user.
A comment payload looks like this:
```yaml:
type: comment
fingerprint: std-base-64 string
credentials: [...]
comment: |-
Hey all, how's it going?
Just wanted to pop by and say howdy.
```
The message head of a comment payload will generally be a truncated form of the
comment itself.
##### Comment Payload Fingerprint {#comment-payload-fingerprint}
The unencoded [fingerprint](#fingerprint) of a [comment
payload](#comment-payload) is calculated as follows:
* Concatenate the following:
* A uvarint indicating the number of bytes in the comment string.
* The comment string.
* Calculate the SHA-256 hash of the concatenation result.
* Prepend a 0 byte to the result of the SHA-256 hash.
This unencoded fingerprint is then standard base-64 encoded, and that is used as
the value of the `fingerprint` field.
#### Credential Payload
A credential [payload](#payload) contains only one or more credentials for an
arbitrary [fingerprint](#fingerprint). Credential payloads can be combined with
other payloads of the same fingerprint to create a new payload with many
credentials.
A credential payload looks like this:
```yaml
type: credential
fingerprint: std-base-64 string
credentials: [...]
# This field is not required, but can be helpful in situations where the
# fingerprint was generated based on multiple change payloads
commits:
- commit hash
- commit hash
- commit hash
# This field is not required, but can be helpful to clarify which description
# was used when generating a change fingerprint.
change_description: blah blah blah
```
## Project Configuration {#project-configuration}
The `.dehub` directory contains all meta information related to the dehub
[project](#project). All files within `.dehub` are tracked by the git repo like
any other files in the project.
### config.yml {#config-yml}
The `.dehub/config.yml` file contains a yaml encoded configuration object:
```yaml
accounts: [...]
access_controls: [...]
```
Both fields are described in their own sub-section below.
#### Account {#account}
An account defines a specific user of a [project](#project). Every account has
an ID; no two accounts within a project may share the same ID.
An account looks like this:
```yaml
id: some_string
signifiers: [...]
```
##### Signifier {#signifier}
A signifier is used to signify that an [account](#account) has taken some
action. The most common use-case is to prove that an account created a
particular [credential](#credential). An account may have more than one
signifier.
Currently there is only one signifier type, `pgp_public_key`:
```yaml
type: pgp_public_key
# Path to ASCII-armored pgp public key, relative to repo root.
path: .dehub/account.asc
```
or
```yaml
type: pgp_public_key
body: inlined ASCII-armored pgp public key
```
#### Access Control {#access-control}
An access control allows or denies a particular commit from becoming a part of
a [project](#project). Each access control has an action (allow or deny) and a
set of filters (filters are described in the next section):
```yaml
action: allow # or deny
filters: [...]
```
When a verifying a commit against a project's access controls, each access
control's filters are applied to the commit in the order they appear in the
configuration. The first access control for which all filters match is found,
and its action is taken.
An access control with no filters matches all commits.
##### Filter {#filter}
There are many kinds of [access control](#access-control) filters. Any filter
can be applied to a commit, with no other input, and produce a boolean value.
All filters have a `type` field which indicates their type.
###### Signature Filter {#signature-filter}
A [filter](#filter) of type `signature` asserts that a commit's
[payload](#payload) contains [signature credentials](#credential) with certain
properties. A signature filter must have one of these fields, which define the
set of users or [accounts](#account) whose signatures are applicable.
* `account_ids: [...]` - an array of account IDs, each having been defined in
the accounts section of the [configuration](#config-yml).
* `any_account: true` - matches any account defined in the accounts section of
the configuration.
* `any: true` - matches any signature, whether or not its signifier has been
defined in the configuration.
A `count` field may also be included. Its value may be an absolute number (e.g.
`5`) or it may be a string indicating a percent (e.g. `"50%"`). If not included
it will be assumed to be `1`.
The count indicates how many accounts from the specified set must have a
signature included. If a percent is given then that will be multiplied against
the size of the set (rounded up) to determine the necessary number.
Here are some example signature filters, and explanations for each:
```yaml
# requires that 2 of the 3 specified accounts has a signature credential on
# the commit.
type: signature
account_ids:
- amy
- bill
- colleen
count: 2
```
```yaml
# requires that every account defined in the configuration has a signature
# credential on the commit.
type: signature
any_account: true
count: 100%
```
```yaml
# requires at least one signature credential, not necessarily from an account.
type: signature
any: true
```
###### Branch Filter {#branch-filter}
A [filter](#filter) of type `branch` matches the commit based on which branch in
the repo it is being or has been committed to. Matching is performed on the
short name of the branch, using globstar pattern matching.
A branch filter can have one or multiple patterns defined. The filter will match
if at least one defined pattern matches the short form of the branch name.
A branch filter with only one pattern can be defined like this:
```yaml
type: branch
pattern: some_branch
```
A branch filter with multiple patterns can be defined like this:
```yaml
type: branch
patterns:
- some_branch
- branch*glob
- amy/**
```
###### Files Changed Filter {#files-changed-filter}
A [filter](#filter) of type `files_changed` matches the commit based on which
files were changed between the tree of the commit's parent and the commit's
tree. Matching is performed on the paths of the changed files, relative to the
repo root.
A files changed filter can have one or multiple patterns defined. The filter
will match if any of the changed files matches at least one defined pattern.
A files changed filter with only one pattern can be defined like this:
```yaml
type: files_changed
pattern: .dehub/*
```
A files changed filter with multiple patterns can be defined like this:
```yaml
type: files_changed
patterns:
- some/dir/*
- foo_files_*
- **.jpg
```
###### Payload Type Filter {#payload-type-filter}
A [filter](#filter) of type `payload_type` matches a commit based on the type of
its [payload](#payload). A payload type filter can have one or more types
defined. The filter will match if the commit's payload type matches at least one
of the defined types.
A payload type filter with only one matching type can be defined like this:
```yaml
type: payload_type
payload_type: comment
```
A payload type filter with multiple matching types can be defined like this:
```yaml
type: payload_type
payload_types:
- comment
- change
```
###### Commit Attributes Filter {#commit-attributes-filter}
A [filter](#filter) of type `commit_attributes` matches a commit based on
certain attributes it has. A commit attributes filter may have one or more
fields defined, each corresponding to a different attribute the commit may have.
If more than one field is defined then all corresponding attributes on the
commit must match for the filter to match.
Currently the only possible attribute is `non_fast_forward: true`, which matches
a commit which is not an ancestor of the HEAD of the branch it's being pushed
onto. This attribute only makes sense in the context of a pre-receive git hook.
A commit attributes filter looks like this:
```yaml
type: commit_attributes
non_fast_forward: true
```
###### Not Filter {#not-filter}
A [filter](#filter) of type `not` matches a commit using the negation of a
sub-filter, defined within the not filter. If the sub-filter returns true for
the commit, then the not filter returns false, and vice-versa.
A not filter looks like this:
```
type: not
filter:
# a branch filter is used as the sub-filter in this example
type: branch
pattern: main
```
##### Default Access Controls {#default-access-controls}
These [access controls](#access-control) will be implicitly appended to the list
defined in the [configuration](#config-yml):
```yaml
# Any account may add any commit to any non-main branch, provided there is at
# least one signature credential. This includes non-fast-forwards.
- action: allow
filters:
- type: not
filter:
type: branch
pattern: main
- type: signature
any_account: true
count: 1
# Non-fast-forwards are denied in all other cases. In effect, one cannot
# force-push onto the main branch.
- action: deny
filters:
- type: commit_attributes
non_fast_forward: true
# Any account may add any change commit to the main branch, provided there is
# at least one signature credential.
- action: allow
filters:
- type: branch
pattern: main
- type: payload_type
payload_type: change
- type: signature
any_account: true
count: 1
# All other actions are denied.
- action: deny
```
These default access controls provide a useful baseline of requirements that all
[projects](#project) will (hopefully) find useful in their infancy.
## Commit Verification {#commit-verification}
The dehub protocol is designed such that every commit is "verifiable". A
verifiable commit has the following properties:
* Its [fingerprint](#fingerprint) is correctly formed.
* All of its [credentials](#credential) are correctly formed.
* If they are signatures, they are valid signatures of the commit's
unencoded fingerprint.
* The project's [access controls](#access-control) allow the commit.
The [project's configuration](#config-yml) is referenced frequently when
verifying a commit, such as when determining which access controls to apply and
discovering [signifiers](#signifier) of [accounts](#account). In all cases the
configuration as defined in the commit's _parent_ is used when verifying that
commit. The exception is the [prime commit](#prime-commit), which uses its own
configuration.
### Prime Commit {#prime-commit}
The prime commit is the trusted seed of the [project](#project). When a user
clones and verifies a dehub project they must, implicitly or explicitly, trust
the contents of the prime commit. All other commits must be ancestors of the
prime commit.
Manually specifying a prime commit is not currently spec'd, but it will be.
By default the prime commit is the root commit of the `main` branch.

@ -0,0 +1,128 @@
# Tutorial 0: Say Hello!
This tutorial will guide you through cloning a dehub project locally, creating a
comment, and pushing that comment back up to the remote. The project in
question: dehub itself!
This tutorial assumes you have [dehub installed](/index.html#getting-started),
you have git and gpg installed, and you have a gpg key already created.
## Step 0: Clone the Project
Cloning the dehub project is as simple as cloning its git repo:
```
git clone https://dehub.dev/src/dehub.git
cd dehub
```
Once cloned, feel free to look around the project. You should initially find
yourself on the `main` branch, the primary branch of most dehub projects
(analogous to the `master` branch of most git repos).
Calling `git log` will show the commits messages for all commits in the branch.
You will notice the commit messages aren't formatted in the familiar way, for
example:
```
commit 351048e9aabef7dc0f99b00f02547e409859a33f
Author: mediocregopher <>
Date: Sat Apr 25 15:17:21 2020 -0600
Completely rewrite SPEC
---
type: change
description: |-
Completely rewrite SPEC
It's good this time, and complete. After this rewrite it will be necessary to
update a lot of the code, since quite a few things got renamed.
fingerprint: AG0s3yILU+0uIZltVY7A9/cgxr/pXk2MzGwExsY/hbIc
credentials:
- type: pgp_signature
pub_key_id: 95C46FA6A41148AC
body: BIG LONG STRING
account: mediocregopher
```
Instead of just being a human-readable description they are YAML encoded payload
objects. We will dive into these payload objects more throughout this tutorial
series.
## Step 1: Checkout the Welcome Branch
Next you're going to checkout the public welcome branch. This is done through a
normal git checkout command:
```
git checkout public/welcome
```
You can do `git log` to see all the comments people have been leaving in this
branch. The `public/welcome` branch is differentiated from the `main` branch in
two ways:
* It has been configured to allow comment commits from anonymous users to be
pushed to it. Project configuration is covered in a future tutorial.
* It has no code files tracked, its only purpose is for comments.
## Step 2: Create Your Comment
Now that you've poked around the welcome branch a bit, it's time to leave a
comment of your own! This is as easy as doing:
```
dehub commit --anon-pgp-key=KEY_NAME comment
```
(`KEY_NAME` should be replaced with any selector which will match your pgp key,
such as the key ID, the name on the key, or the email.)
Your default text editor (defined by the EDITOR environment variable) will pop
up and you can then write down your comment. When you save and close your editor
dehub will sign the comment with your pgp key and create a commit with it.
If you're having trouble thinking of something to say, here's some prompts to
get you going:
* Introduce yourself; say where you're from and what your interests are.
* How did you find dehub? Why is it interesting to you?
* If you're using dehub for a project, shill your project!
* If you'd like to get involved in dehub's development, let us know what your
skills are and how you can help. Remember, it takes more than expert
programmers to make a project successful.
Once you've created your commit you can call `git log` to verify that it's been
created to your liking. If there's anything about the comment you'd like to
change you can amend the commit like so:
```
dehub commit --anon-pgp-key=KEY_NAME comment --amend
```
## Step 3: Push Your Commit
As of now your comment commit only exists on your local machine. For everyone
else to see it you'll need to push it to the dehub server, exactly like with a
normal git commit. Pushing is done in the same way as a normal git commit as
well: `git push`.
If you receive an error that's like `Updates were rejected because the tip of
your current branch is behind` then someone else has pushed to the branch in
between the last time you pulled and now. Do a `git pull --rebase` to pull in
those new changes, and try pushing again.
## Step 4: Follow the Conversation
In order to see other people's responses to your comment, and all other parts of
the conversation, all you need to do is call `git pull` with the
`public/welcome` branch checked out.
You now have all the tools needed to participate in a dehub discussion thread!
Continue on to [Tutorial 1](tut1.html) to set up your own dehub project and
learn about credentials and their verification.

@ -0,0 +1,178 @@
# Tutorial 1: Create Your Own Project
This tutorial will guide you through starting a dehub project of your own, as
well as introducing some basic concepts regarding how commit payloads work. You
will use an example hello world project to do this.
This tutorial assumes you have already completed [Tutorial 0](tut0.html).
## Step 0: Init the Project
A dehub project is initialized in the same way as a git project. An empty
directory is created, and `dehub init` is run within that directory.
```
mkdir hello-world
cd hello-world
dehub init
```
`dehub init` does nearly exactly the same thing as `git init`, with the primary
difference being that it sets the initial branch to be `main` instead of
`master`. dehub makes a distinction between `main` and `master` in order to help
prevent confusion between dehub and vanilla git projects, as well as to avoid
conflicts when migrating vanilla git projects to dehub.
## Step 1: Add the First Account
A dehub project is not fully initialized until it has an account defined for it.
dehub accounts refer to a specific user who has some kind of access to the
project. Each account can have specific permissions for it, as well as multiple
ways of signifying itself.
For now, you'll add a basic account `tut` with a pgp key signifier. First,
create the `.dehub` directory, which is where all dehub project configuration
goes, and put your pgp key there:
```
mkdir .dehub
gpg -a --export KEY_ID > .dehub/tut.asc
```
Next you'll create the `.dehub/config.yml` file, which is where accounts are
actually defined (amongst many other things). The file should have the following
contents:
```yaml
# contents of .dehub/config.yml
---
accounts:
- id: tut
signifiers:
- type: pgp_public_key_file
path: ".dehub/tut.asc"
```
Finally, you'll commit these changes and the project will have its first commit!
Committing changes works very similarly to committing comments (as you did in
[Tutorial 0](tut0.html)). Where a comment commit merely carries a user's
comment, a change commit describes a set of changes to the tracked files in the
git repo.
```
git add --all
dehub commit --as tut change
```
Like when you made a comment commit, this will pop up with your editor asking
for a description of the changes. Fill it in with something like `Initialize the
project` and save+close the editor. Depending on your pgp key settings you'll
likely be prompted for your pgp key password at this point. After that the
commit has been created!
## Step 2: Inspect the Payload
In this step you're going to look at the commit you just created and learn about
the contents of the payload. To view the commit do `git show`. Something similar
to the following should be output as the commit message:
```
commit 3cdcbc19546d4e6d817ebfba3e18afbc23283ec0
Author: username <>
Date: Sat Apr 25 15:17:21 2020 -0600
Initialize the project
---
type: change
description: Initialize the project
fingerprint: AG0s3yILU+0uIZltVY7A9/cgxr/pXk2MzGwExsY/hbIc
credentials:
- type: pgp_signature
pub_key_id: 95C46FA6A41148AC
body: BIG LONG STRING
account: tut
```
All commits in a dehub project will contain a similar looking message. The first
line (the head) is always a human readable description of the commit. In this
case our commit description itself, `Initialize the project`, was used.
After the head comes the payload, which is always a YAML encoded object. All
payloads have a `type` field indicating what type of payload they are. That type
will determine what other fields the payload is expected to have. The other
fields in this payload object are:
* `description`: This is the description which was input into the editor when
creating the change commit.
* `fingerprint`: A unique descriptor for this set of changes. It is computed
using both `description` and the files changed.
* `credentials`: A set of credentials for this commit, each one declaring
that this commit has been given approval by a user. This commit has one
`pgp_signature` credential, created by the `tut` account. The `body` is a
signature of the `fingerprint` created by the `tut`'s pgp key.
## Step 3: Create Another Commit
Now that the initial commit is created, and configuration has been added to the
dehub project, you can continue on to use the project for what it was intended
for: greeting the world!
Add a simple "hello world" script to the project by doing:
```
echo 'echo "hello world"' > hello.sh
git add hello.sh
dehub commit --as tut change --descr 'add hello.sh'
```
You'll notice that this time around you used the `--descr` flag to declare the
change's description, rather than opening up the editor
Once again you can inspect the payload you just created using `git show`, if
you'd like, or continue on to the next step to learn about commit verification.
## Step 4: Verify Your Commits
All this work to create YAML encoded payloads has been done for one primary
purpose: to make commits verifiable. A verifiable commit is one which follows
the access controls defined by its parent.
Your dehub project doesn't have any explicitly defined access controls (that
will be covered in a future tutorial), and so the defaults are used. By default,
dehub requires that all commits in `main` are change commits which have been
signed by at least one account.
In order to verify the HEAD commit you can do:
```
dehub verify
```
This command looks at the project configuration defined in the parent of HEAD
and verifies that HEAD conforms to it. The HEAD of your project is a change
commit signed by the account `tut`, and so should be verifiable.
Arbitrary commits can be verified using the `--rev` flag. This command will
verify the parent of HEAD, i.e. the initial commit:
```
dehub verify --rev HEAD^
```
The initial commit doesn't have a parent, and so is a special case for
verification. The initial commit uses the configuration defined within itself in
order to verify itself. This creates an exploit opportunity: if you clone a
remote dehub project and an attacker intercepts that request they will be able
to send you back a project with a different initial commit than what you
expected. The whole project will still be verifiable, even though it's been
compromised. For this reason it's important to manually verify that the initial
commit of projects you clone are configured correctly, using the expected
signifiers for the expected accounts.
You are now able to initialize a project, configure accounts within it, commit
changes to its files, and verify those commits. Well done! Continue on to
[Tutorial 2](tut2.html), where you will learn how to configure dehub's access
controls.

@ -0,0 +1,262 @@
# Tutorial 2: Access Controls
Access controls, in the context of a dehub project, refer to configuration
defining who is allowed to do what. These controls are defined within the dehub
project itself, within the `.dehub/config.yml` file. This tutorial will guide
you through the basics of how access controls work, how to define them, and some
examples of what can be done with them.
This tutorial assumes you have already completed [Tutorial 1](tut1.html), and
builds on top of the project which was started there.
## Step 0: Create a Restricted Account
Inside the project you started in [Tutorial 1](tut1.html) you're going to add
another account to the project, called `tot`. Initially, `tot` will have all the
same permissions as `tut`, except being allowed to modify the project
configuration.
First, export your gpg key into the project for `tot` to use, the same key used
for `tut`:
```
gpg -a --export KEY_ID > .dehub/tot.asc
```
(For the purposes of a tutorial it's fine for two accounts to share a
key, but it's not something which generally makes sense to do.)
Now, modify the `.dehub/config.yml` to have the following contents:
```yaml
# contents of .dehub/config.yml
---
accounts:
- id: tut
signifiers:
- type: pgp_public_key_file
path: ".dehub/tut.asc"
- id: tot
signifiers:
- type: pgp_public_key_file
path: ".dehub/tot.asc"
access_controls:
- action: allow
filters:
- type: signature
account_ids:
- tut
- type: files_changed
pattern: .dehub/*
- action: deny
filters:
- type: files_changed
pattern: .dehub/*
```
The `accounts` section has been modified to add the `tot` account, but the
primary change here has been to add the `access_controls` section. The next
sub-sections will explain what exactly is being done here, but for now go ahead
and commit these changes:
```
git add --all
dehub commit --as tut change --descr 'add new restricted tot account'
```
### Access Controls
Each access control is an action+filters pair. For any commit being verified,
the access controls defined in its parent commit are iterated through, in order,
until one is found whose filters all match the commit being verified. The action
for that access control, either `allow` or `deny`, is then taken.
If no access controls are defined, or none match, then the default access
controls are used. These are explicitly defined in the
[SPEC](SPEC.html#default-access-controls), but the general effect of them is to
require that all commits have one signature from any of the project's accounts.
### Access Control Filters
There are many different filter types, so only the ones used in the tutorial
will be explained. An exhaustive listing can be found in the
[SPEC](SPEC.html#filter).
The `signature` filter matches commits which have a signature credential created
by any one of the specified accounts. The `files_changed` filter matches commits
which have changed files whose paths match the specified patterns (relative to
the project's root).
### Putting it Together
The first of the new actions controls you've defined is:
```
- action: allow
filters:
- type: signature
account_ids:
- tut
- type: files_changed
pattern: .dehub/*
```
This allows any commits which have been signed by `tut` and which modify any of
the files in `.dehub/*`. The second access control is:
```
- action: deny
filters:
- type: files_changed
pattern: .dehub/*
```
This denies any commits which modify any of the files in `.dehub/*`. If a commit
does not match the first access control, but does match this second access
control, it can be assumed that the commit does _not_ have a signature from
`tut` (because that's the only difference between them). Therefore, the effect
of these two controls put together is to only allow `tut` to make changes to the
`.dehub` directory's files.
## Step 1: Test the Restrictions
Let's say that your new user `tot` is having a bit of rebellious phase, and
wants to kick `tut` out of the project. Change `.dehub/config.yml` to have the
following contents (note that `accounts` has been left the same and so is mostly
elided):
```
# abbreviated contents of .dehub/config.yml
---
accounts:
...
access_controls:
- action: deny
filters:
- type: signature
account_ids:
- tut
```
So edgy. Make the commit for `tot`, being sure that the value for the `--as`
flag indicates you're committing _as_ `tot`:
```
git add --all
dehub commit --as tot change --descr 'tut is a butt'
```
Somewhat unexpectedly, the commit has been created! You can see it by doing `git
show`. This shouldn't be possible though, because the previous commit disallowed
anyone but `tut` from changing files within the `.dehub/` directory. Is dehub
broken?
The fact is that, regardless of whether or not the `dehub` tool allows one to
create this commit, `tot` can create this commit. The important thing is that
`tut` is able to notice that it's been created and do something about it. In a
real-world situation, both `tot` and `tut` would be using different computers,
and when `tut` (or anyone else) receives the commit from `tot` they will try to
verify it, fail to do so, and ignore it.
If you perform `dehub verify` you will be greeted with the following error:
```
exiting: blah blah blah: commit matched and denied by this access control:
action: deny
filters:
- type: files_changed
pattern: .dehub/*
```
Because the parent of this commit's config disallows this commit (via the given
access control) it is not verifiable. Go ahead and delete the commit by doing:
```
git reset --hard "$(git rev-list HEAD | tail -3 | head -n1)"
```
## Step 2: Different Restrictions
In light of `tot`'s recent actions it might be prudent to pull back their
permissions a bit. Go ahead and change the `.dehub/config.yml` to:
```
# abbreviated contents of .dehub/config.yml
---
accounts:
...
access_controls:
- action: allow
filters:
- type: signature
account_ids:
- tot
- type: branch
pattern: tot/*
- action: deny
filters:
- type: signature
account_ids:
- tot
```
and commit the change:
```
git add --all
dehub commit --as tut change --descr 'restrict tot to non-main branches'
```
After this, `tot` will still be able to interact with the project, but only
within branches whose names have the prefix `tot/`; the `main` branch remains
open to other accounts, such as `tut`, due to the default access controls.
### Check the New Restrictions
`tot` has decided to do something constructive and wants to make a shell script
which wraps the `echo` command. So helpful. Make a new branch for `tot` to use,
and create a commit on it:
```
git checkout -b tot/echo-script
echo 'echo "$@"' > echo.sh
git add echo.sh
dehub commit --as tot change --descr "added echo.sh script"
```
Check that the commit verifies (it should, since it's on a branch with the
prefix `tot/`):
```
dehub verify
```
Now, as a final sanity check, you'll cherry-pick the commit onto `main` and
ensure that it does _not_ verify there.
```
git checkout main
git cherry-pick tot/echo-script
```
Running `dehub verify` now should fail, even though the commit remains the same.
The only difference is the branch name; the commit is allowed in branches with
the prefix `tot/`, and disallowed otherwise.
Finally, reverse that cherry-pick to make `main` verifiable again:
```
git reset --hard "$(git rev-list HEAD | tail -4 | head -n1)"
```
You now have an understanding of how dehub's access controls work. Access
controls are extremely flexible and can be formulated to fit a wide-variety of
use-cases. In [Tutorial 3](tut3.html) we'll see how access controls can be
formulated to allow for commit sign-offs, where multiple accounts must accredit
a commit before it can be verified, and how such a commit can be created.

@ -0,0 +1,246 @@
# Tutorial 3: Commit Sign-Off
Commit sign-off is a common pattern in vanilla git projects, where a commit must
be approved by one or more people (besides the commit author themselves) in
order to be allowed into the primary branch.
dehub is able to accomplish this same pattern using only the access controls
which have already been covered in this tutorial series and a command which has
not: `dehub combine`. This tutorial will guide you through using `dehub combine`
to facilitate commit sign-off.
This tutorial assumes you have already completed [Tutorial 2](tut2.html), and
builds on top of the project which was started there.
## Step 0: Loosen the Previous Restrictions
In the [previous tutorial](tut2.html) you took an existing project, added a new
user `tot` to it, and then restricted `tot` to only be allowed to make commits
in a certain subset of branches which excluded the `main` branch.
As seen in that tutorial, `tot` is not able to create commits for the `main`
branch _at all_. In this tutorial we're going to open `main` back up to `tot`,
but only with a very important caveat: `tot`'s commits must be approved by
someone else.
In the `hello-world` project which was used for previous tutorials, with the
`main` branch checked out, go ahead and modify `.dehub/config.yml` to have the
following contents:
```
# contents of .dehub/config.yml
---
accounts:
- id: tut
signifiers:
- type: pgp_public_key_file
path: ".dehub/tut.asc"
- id: tot
signifiers:
- type: pgp_public_key_file
path: ".dehub/tot.asc"
access_controls:
- action: allow
filters:
- type: signature
account_ids:
- tot
- type: branch
pattern: tot/*
- action: deny
filters:
- type: branch
pattern: main
- type: not
filter:
type: signature
any_account: true
count: 2
```
and commit the changes:
```
git add .dehub/config.yml
dehub commit --as tut change --descr 'require commit sign-offs in main'
```
The primary change was to replace the old access control denying `tot` the
ability to commit to anything (outside of `tot/*` branches) with this one:
```
- action: deny
filters:
- type: branch
pattern: main
- type: not
filter:
type: signature
any_account: true
count: 2
```
There are two new things here. The first is the new fields on the `signature`
filter: `any_account` replaces the `account_ids` field, and refers to any
account which is defined in the `accounts` section; `count` declares how many
accounts must have a signature on the commit for the filter to match (if not
specified it defaults to 1).
The second new thing is the `not` filter: `not` wraps any other filter, and
reverses whether or not it matches. In this case, it's wrapping our `signature`
filter, such that this access control will match only if the commit _does not_
have signature credentials from 2 different accounts.
The total effect of this access control is to deny any commits to `main` which
have not been signed-off by 2 different accounts.
## Step 1: Some Changes to Merge
In the previous tutorial `tot` created a new script, `echo.sh`, in a new branch
called `tot/echo-script`. Check that branch out, rebase it on `main` (this will
help in later steps), and add another script to it:
```
git checkout tot/echo-script
git rebase main
echo 'echo "$@" | awk "{ print toupper(\$0) }"' > echo-upper.sh
git add echo-upper.sh
dehub commit --as tot change --descr 'echo-upper.sh'
```
Now the `tot/echo-script` branch contains two commits which aren't on `main`,
both of them signed by `tot`. What will happen next is that the branch's commits
will be combined into a single commit, be given accreditation by both `tut` and
`tot`, and added to the `main` branch.
## Step 2: Accreditation
First, `tot` will accredit both commits, and unify the two descriptions in the
process. To do this, you will create your first `credential` commit:
```
dehub commit --as tot credential --start HEAD^^ --descr 'add echo.sh and echo-upper.sh'
```
A `credential` commit, at its core, contains nothing except credentials for any
arbitrary fingerprint. To view the credential commit you just made
do: `git show`. You should see a commit message like:
```
Credential of AO3dn4Se61hq6OWy4Lm6m3MxdT2ru6TrIobuHaWJJidt
---
type: credential
commits:
- f085f13fa839ece122476601d970460ac249dc69 # these will be different
- 40a81ffb4f52dc4149570672f7f7fc053f12226a
change_description: add echo.sh and echo-upper.sh
fingerprint: AO3dn4Se61hq6OWy4Lm6m3MxdT2ru6TrIobuHaWJJidt
credentials:
- type: pgp_signature
pub_key_id: XXX
body: BIG LONG STRING
account: tot
```
You'll notice that the credential commit's fingerprint is different than either
of the two commits it accredits. This is the fingerprint is based on the
_combination_ of the two commits; it is based on the total of the file changes
and the description provided by the user. The two commits are enumerated in the
`commits` field of the payload, and the description provided by the user is
stored in the `change_description` field.
The combined commits have now been accredited by `tot`, but not `tut`, and so
they still lack a necessary credential. Have `tut` make a credential now:
```
dehub commit --as tut credential --rev HEAD
```
This form of the `credential` sub-command only accredits a single commit. When a
single commit is accredited and it itself is a credential commit then the new
commit which is created is merely a copy of the specified credential commit with
the caller's own credential appended to the `credentials` list. You can see this
with `git show`, which should look like:
```
Credential of AO3dn4Se61hq6OWy4Lm6m3MxdT2ru6TrIobuHaWJJidt
---
type: credential
commits:
- f085f13fa839ece122476601d970460ac249dc69 # these will be different
- 40a81ffb4f52dc4149570672f7f7fc053f12226a
change_description: add echo.sh and echo-upper.sh
fingerprint: AO3dn4Se61hq6OWy4Lm6m3MxdT2ru6TrIobuHaWJJidt
credentials:
- type: pgp_signature
pub_key_id: XXX
body: BIG LONG STRING
account: tot
- type: pgp_signature
pub_key_id: XXX
body: BIG LONG STRING
account: tut
```
There are now enough credentials to combine the commits in the `tot/echo-script`
branch into a single commit on the `main` branch.
## Step 3: Combination
At this point the `tot/echo-script` branch has the following elements in place:
* Two change commits, which we want to combine and bring over to `main`.
* A credential commit made by `tot` for the combined changes.
* A credential commit made by `tut` for the combined changes, which includes
`tot`'s credentials.
Combining the commits and placing them on `main` is done with a single command:
```
dehub combine --start HEAD^^^^ --end HEAD --onto main
```
This `combine` command combines all changes made within the given commit range,
the last change description found in that range (in this case it will be from
`tut`'s credential commit), and all credentials for that set of changes. The
command combines them into a single commit which it places on the `main` branch.
You can see the commit you've just created by doing:
```
git checkout main
git show
```
The commit should contain both of the new files, and the message should look
something like:
```
add echo.sh and echo-upper.sh
---
type: change
description: add echo.sh and echo-upper.sh
fingerprint: ALOcEuKJkgIdz27z0fjF1NEbK6Y9cEh2RH4/sL3uf3oa
credentials:
- type: pgp_signature
pub_key_id: XXX
body: BIG LONG BODY
account: tot
- type: pgp_signature
pub_key_id: XXX
body: BIG LONG BODY
account: tut
```
The commit is accredited by two different accounts, and so is allowed to be on
the `main` branch. This can be verified by doing `dehub verify`.
You now are able to require commit sign-off and create signed-off commits! The
access control settings surrounding commit sign-offs are entirely up to you and
your project's needs. You can require sign-off from specific accounts, any
accounts, only on specific files, only in certain branches, etc... all using the
same basic access control building blocks.

@ -0,0 +1,83 @@
package dehub
import (
"crypto/sha256"
"encoding/binary"
"fmt"
"hash"
"sort"
)
var (
defaultHashHelperAlgo = sha256.New
)
type hashHelper struct {
hash hash.Hash
varintBuf []byte
}
// if h is nil it then defaultHashHelperAlgo will be used
func newHashHelper(h hash.Hash) *hashHelper {
if h == nil {
h = defaultHashHelperAlgo()
}
s := &hashHelper{
hash: h,
varintBuf: make([]byte, binary.MaxVarintLen64),
}
return s
}
func (s *hashHelper) sum(prefix []byte) []byte {
out := make([]byte, len(prefix), len(prefix)+s.hash.Size())
copy(out, prefix)
return s.hash.Sum(out)
}
func (s *hashHelper) writeUint(i uint64) {
n := binary.PutUvarint(s.varintBuf, i)
if _, err := s.hash.Write(s.varintBuf[:n]); err != nil {
panic(fmt.Sprintf("error writing %x to %T: %v", s.varintBuf[:n], s.hash, err))
}
}
func (s *hashHelper) writeStr(str string) {
s.writeUint(uint64(len(str)))
s.hash.Write([]byte(str))
}
func (s *hashHelper) writeChangedFiles(changedFiles []ChangedFile) {
sort.Slice(changedFiles, func(i, j int) bool {
return changedFiles[i].Path < changedFiles[j].Path
})
s.writeUint(uint64(len(changedFiles)))
for _, fileChanged := range changedFiles {
s.writeStr(fileChanged.Path)
s.hash.Write(fileChanged.FromMode.Bytes())
s.hash.Write(fileChanged.FromHash[:])
s.hash.Write(fileChanged.ToMode.Bytes())
s.hash.Write(fileChanged.ToHash[:])
}
}
var (
changeHashVersion = []byte{0}
commentHashVersion = []byte{0}
)
// if h is nil it then defaultHashHelperAlgo will be used
func genChangeFingerprint(h hash.Hash, msg string, changedFiles []ChangedFile) []byte {
s := newHashHelper(h)
s.writeStr(msg)
s.writeChangedFiles(changedFiles)
return s.sum(changeHashVersion)
}
// if h is nil it then defaultHashHelperAlgo will be used
func genCommentFingerprint(h hash.Hash, comment string) []byte {
s := newHashHelper(h)
s.writeStr(comment)
return s.sum(commentHashVersion)
}

@ -0,0 +1,237 @@
package dehub
import (
"bytes"
"encoding/binary"
"hash"
"testing"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/filemode"
)
type testHash struct {
bytes.Buffer
}
var _ hash.Hash = new(testHash)
func (th *testHash) Sum(b []byte) []byte {
return append(b, th.Buffer.Bytes()...)
}
func (th *testHash) Size() int {
return th.Buffer.Len()
}
func (th *testHash) BlockSize() int {
return 1
}
func (th *testHash) assertContents(t *testing.T, parts [][]byte) {
b := th.Buffer.Bytes()
for _, part := range parts {
if len(part) > len(b) || !bytes.Equal(part, b[:len(part)]) {
t.Fatalf("expected %q but only found %q", part, b)
}
b = b[len(part):]
}
if len(b) != 0 {
t.Fatalf("unexpected extra bytes written to testHash: %q", b)
}
}
func uvarint(i uint64) []byte {
buf := make([]byte, binary.MaxVarintLen64)
n := binary.PutUvarint(buf, i)
return buf[:n]
}
func TestGenCommentFingerprint(t *testing.T) {
type test struct {
descr string
comment string
exp [][]byte
}
tests := []test{
{
descr: "empty comment",
comment: "",
exp: [][]byte{uvarint(0)},
},
{
descr: "normal comment",
comment: "this is a normal comment",
exp: [][]byte{uvarint(24), []byte("this is a normal comment")},
},
{
descr: "comment with unicode",
comment: "sick comment ⚡",
exp: [][]byte{uvarint(16), []byte("sick comment ⚡")},
},
}
for _, test := range tests {
t.Run(test.descr, func(t *testing.T) {
th := new(testHash)
genCommentFingerprint(th, test.comment)
th.assertContents(t, test.exp)
})
}
}
func TestGenChangeFingerprint(t *testing.T) {
type test struct {
descr string
msg string
changedFiles []ChangedFile
exp [][]byte
}
hash := func(i byte) plumbing.Hash {
var h plumbing.Hash
h[0] = i
return h
}
hashB := func(i byte) []byte {
h := hash(i)
return h[:]
}
tests := []test{
{
descr: "empty",
msg: "",
changedFiles: nil,
exp: [][]byte{uvarint(0), uvarint(0)},
},
{
descr: "empty changes",
msg: "some msg",
changedFiles: nil,
exp: [][]byte{uvarint(8), []byte("some msg"), uvarint(0)},
},
{
descr: "empty msg",
msg: "",
changedFiles: []ChangedFile{{
Path: "foo",
ToMode: filemode.Regular, ToHash: hash(1),
}},
exp: [][]byte{uvarint(0), uvarint(1),
uvarint(3), []byte("foo"),
filemode.Empty.Bytes(), hashB(0),
filemode.Regular.Bytes(), hashB(1)},
},
{
descr: "files added",
msg: "a",
changedFiles: []ChangedFile{
{
Path: "foo",
ToMode: filemode.Regular, ToHash: hash(1),
},
{
Path: "somedir/bar",
ToMode: filemode.Executable, ToHash: hash(2),
},
},
exp: [][]byte{uvarint(1), []byte("a"), uvarint(2),
uvarint(3), []byte("foo"),
filemode.Empty.Bytes(), hashB(0),
filemode.Regular.Bytes(), hashB(1),
uvarint(11), []byte("somedir/bar"),
filemode.Empty.Bytes(), hashB(0),
filemode.Executable.Bytes(), hashB(2),
},
},
{
descr: "files added (unordered)",
msg: "a",
changedFiles: []ChangedFile{
{
Path: "somedir/bar",
ToMode: filemode.Executable, ToHash: hash(2),
},
{
Path: "foo",
ToMode: filemode.Regular, ToHash: hash(1),
},
},
exp: [][]byte{uvarint(1), []byte("a"), uvarint(2),
uvarint(3), []byte("foo"),
filemode.Empty.Bytes(), hashB(0),
filemode.Regular.Bytes(), hashB(1),
uvarint(11), []byte("somedir/bar"),
filemode.Empty.Bytes(), hashB(0),
filemode.Executable.Bytes(), hashB(2),
},
},
{
descr: "file modified",
msg: "a",
changedFiles: []ChangedFile{{
Path: "foo",
FromMode: filemode.Regular, FromHash: hash(1),
ToMode: filemode.Executable, ToHash: hash(2),
}},
exp: [][]byte{uvarint(1), []byte("a"), uvarint(1),
uvarint(3), []byte("foo"),
filemode.Regular.Bytes(), hashB(1),
filemode.Executable.Bytes(), hashB(2),
},
},
{
descr: "file removed",
msg: "a",
changedFiles: []ChangedFile{{
Path: "foo",
FromMode: filemode.Regular, FromHash: hash(1),
}},
exp: [][]byte{uvarint(1), []byte("a"), uvarint(1),
uvarint(3), []byte("foo"),
filemode.Regular.Bytes(), hashB(1),
filemode.Empty.Bytes(), hashB(0),
},
},
{
descr: "files added, modified, and removed",
msg: "aaa",
changedFiles: []ChangedFile{
{
Path: "foo",
ToMode: filemode.Regular, ToHash: hash(1),
},
{
Path: "bar",
FromMode: filemode.Regular, FromHash: hash(2),
ToMode: filemode.Regular, ToHash: hash(3),
},
{
Path: "baz",
FromMode: filemode.Executable, FromHash: hash(4),
},
},
exp: [][]byte{uvarint(3), []byte("aaa"), uvarint(3),
uvarint(3), []byte("bar"),
filemode.Regular.Bytes(), hashB(2),
filemode.Regular.Bytes(), hashB(3),
uvarint(3), []byte("baz"),
filemode.Executable.Bytes(), hashB(4),
filemode.Empty.Bytes(), hashB(0),
uvarint(3), []byte("foo"),
filemode.Empty.Bytes(), hashB(0),
filemode.Regular.Bytes(), hashB(1),
},
},
}
for _, test := range tests {
t.Run(test.descr, func(t *testing.T) {
th := new(testHash)
genChangeFingerprint(th, test.msg, test.changedFiles)
th.assertContents(t, test.exp)
})
}
}

@ -1,4 +1,4 @@
module dehub
module dehub.dev/src/dehub.git
go 1.13

@ -0,0 +1,628 @@
package dehub
import (
"bytes"
"errors"
"fmt"
"sort"
"strings"
"time"
"dehub.dev/src/dehub.git/accessctl"
"dehub.dev/src/dehub.git/fs"
"dehub.dev/src/dehub.git/sigcred"
"dehub.dev/src/dehub.git/typeobj"
"dehub.dev/src/dehub.git/yamlutil"
"gopkg.in/src-d/go-git.v4"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/object"
yaml "gopkg.in/yaml.v2"
)
// Payload describes the methods which must be implemented by the different
// payload types. None of the methods should modify the underlying object.
type Payload interface {
// MessageHead returns the head of the commit message (i.e. the first line).
// The PayloadCommon of the outer PayloadUnion is passed in for added
// context, if necessary.
MessageHead(PayloadCommon) string
// Fingerprint returns the raw fingerprint which can be signed when
// accrediting this payload. The ChangedFile objects given describe the file
// changes between the parent commit and this commit.
//
// If this method returns nil it means that the payload has no fingerprint
// in-and-of-itself.
Fingerprint([]ChangedFile) ([]byte, error)
}
// PayloadCommon describes the fields common to all Payloads.
type PayloadCommon struct {
Fingerprint yamlutil.Blob `yaml:"fingerprint"`
Credentials []sigcred.CredentialUnion `yaml:"credentials"`
// LegacyChangeHash is no longer used, use Fingerprint instead.
LegacyChangeHash yamlutil.Blob `yaml:"change_hash,omitempty"`
}
func (cc PayloadCommon) credIDs() []string {
m := map[string]struct{}{}
for _, cred := range cc.Credentials {
if cred.AccountID != "" {
m[cred.AccountID] = struct{}{}
} else if cred.AnonID != "" {
m[cred.AnonID] = struct{}{}
}
}
s := make([]string, 0, len(m))
for id := range m {
s = append(s, id)
}
sort.Strings(s)
return s
}
func abbrevCommitMessage(msg string) string {
i := strings.Index(msg, "\n")
if i > 0 {
msg = msg[:i]
}
if len(msg) > 80 {
msg = msg[:77] + "..."
}
return msg
}
// PayloadUnion represents a single Payload of variable type. Only one field
// should be set on a PayloadUnion, unless otherwise noted.
type PayloadUnion struct {
Change *PayloadChange `type:"change,default"`
Credential *PayloadCredential `type:"credential"`
Comment *PayloadComment `type:"comment"`
// Common may be set in addition to one of the other fields.
Common PayloadCommon `yaml:",inline"`
}
// MarshalYAML implements the yaml.Marshaler interface.
func (p PayloadUnion) MarshalYAML() (interface{}, error) {
return typeobj.MarshalYAML(p)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (p *PayloadUnion) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err := typeobj.UnmarshalYAML(p, unmarshal); err != nil {
return err
} else if len(p.Common.LegacyChangeHash) > 0 {
p.Common.Fingerprint = p.Common.LegacyChangeHash
p.Common.LegacyChangeHash = nil
}
return nil
}
// Payload returns the Payload instance encapsulated by this PayloadUnion.
//
// This will panic if a Payload field is not populated.
func (p PayloadUnion) Payload() Payload {
el, _, err := typeobj.Element(p)
if err != nil {
panic(err)
}
return el.(Payload)
}
// Type returns the Payload's type (as would be used in its YAML "type" field).
//
// This will panic if a Payload field is not populated.
func (p PayloadUnion) Type() string {
_, typeStr, err := typeobj.Element(p)
if err != nil {
panic(err)
}
return typeStr
}
// MarshalText implements the encoding.TextMarshaler interface by returning the
// form the payload in the git commit message.
func (p PayloadUnion) MarshalText() ([]byte, error) {
msgHead := abbrevCommitMessage(p.Payload().MessageHead(p.Common))
msgBodyB, err := yaml.Marshal(p)
if err != nil {
return nil, fmt.Errorf("marshaling payload %+v as yaml: %w", p, err)
}
w := new(bytes.Buffer)
w.WriteString(msgHead)
w.WriteString("\n\n---\n")
w.Write(msgBodyB)
return w.Bytes(), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface by decoding a
// payload object which has been encoded into a git commit message.
func (p *PayloadUnion) UnmarshalText(msg []byte) error {
i := bytes.Index(msg, []byte("\n"))
if i < 0 {
return fmt.Errorf("commit message %q is malformed, it has no body", msg)
}
msgBody := msg[i:]
if err := yaml.Unmarshal(msgBody, p); err != nil {
return fmt.Errorf("unmarshaling commit payload from yaml: %w", err)
}
return nil
}
// AccreditPayload returns the given PayloadUnion with an appended Credential
// provided by the given SignifierInterface.
func (proj *Project) AccreditPayload(payUn PayloadUnion, sig sigcred.Signifier) (PayloadUnion, error) {
headFS, err := proj.headFS()
if err != nil {
return payUn, fmt.Errorf("retrieving HEAD fs: %w", err)
}
cred, err := sig.Sign(headFS, payUn.Common.Fingerprint)
if err != nil {
return payUn, fmt.Errorf("signing fingerprint %q: %w", payUn.Common.Fingerprint, err)
}
payUn.Common.Credentials = append(payUn.Common.Credentials, cred)
return payUn, nil
}
// CommitDirectParams are the parameters to the CommitDirect method. All are
// required, unless otherwise noted.
type CommitDirectParams struct {
PayloadUnion PayloadUnion
Author string
ParentHash plumbing.Hash // can be zero if the commit has no parents (Q_Q)
GitTree *object.Tree
}
// CommitDirect constructs a git commit object and and stores it, returning the
// resulting Commit. This method does not interact with HEAD at all.
func (proj *Project) CommitDirect(params CommitDirectParams) (Commit, error) {
msgB, err := params.PayloadUnion.MarshalText()
if err != nil {
return Commit{}, fmt.Errorf("encoding payload to message string: %w", err)
}
author := object.Signature{
Name: params.Author,
When: time.Now(),
}
commit := &object.Commit{
Author: author,
Committer: author,
Message: string(msgB),
TreeHash: params.GitTree.Hash,
}
if params.ParentHash != plumbing.ZeroHash {
commit.ParentHashes = []plumbing.Hash{params.ParentHash}
}
commitObj := proj.GitRepo.Storer.NewEncodedObject()
if err := commit.Encode(commitObj); err != nil {
return Commit{}, fmt.Errorf("encoding commit object: %w", err)
}
commitHash, err := proj.GitRepo.Storer.SetEncodedObject(commitObj)
if err != nil {
return Commit{}, fmt.Errorf("setting encoded object: %w", err)
}
return proj.GetCommit(commitHash)
}
// Commit uses the given PayloadUnion to create a git commit object and commits
// it to the current HEAD, returning the full Commit.
func (proj *Project) Commit(payUn PayloadUnion) (Commit, error) {
headRef, err := proj.TraverseReferenceChain(plumbing.HEAD, func(ref *plumbing.Reference) bool {
return ref.Type() == plumbing.HashReference
})
if err != nil {
return Commit{}, fmt.Errorf("resolving HEAD to a hash reference: %w", err)
}
headRefName := headRef.Name()
headHash, err := proj.ReferenceToHash(headRefName)
if err != nil {
return Commit{}, fmt.Errorf("resolving ref %q (HEAD): %w", headRefName, err)
}
// TODO this is also used in the same way in NewCommitChange. It might make
// sense to refactor this logic out, it might not be needed in fs at all.
_, stagedTree, err := fs.FromStagedChangesTree(proj.GitRepo)
if err != nil {
return Commit{}, fmt.Errorf("getting staged changes: %w", err)
}
commit, err := proj.CommitDirect(CommitDirectParams{
PayloadUnion: payUn,
Author: strings.Join(payUn.Common.credIDs(), ", "),
ParentHash: headHash,
GitTree: stagedTree,
})
if err != nil {
return Commit{}, err
}
// now set the branch to this new commit
newHeadRef := plumbing.NewHashReference(headRefName, commit.Hash)
if err := proj.GitRepo.Storer.SetReference(newHeadRef); err != nil {
return Commit{}, fmt.Errorf("setting reference %q to new commit hash %q: %w",
headRefName, commit.Hash, err)
}
return commit, nil
}
// HasStagedChanges returns true if there are file changes which have been
// staged (e.g. via "git add").
func (proj *Project) HasStagedChanges() (bool, error) {
w, err := proj.GitRepo.Worktree()
if err != nil {
return false, fmt.Errorf("retrieving worktree: %w", err)
}
status, err := w.Status()
if err != nil {
return false, fmt.Errorf("retrieving worktree status: %w", err)
}
var any bool
for _, fileStatus := range status {
if fileStatus.Staging != git.Unmodified &&
fileStatus.Staging != git.Untracked {
any = true
break
}
}
return any, nil
}
// VerifyCommits verifies that the given commits, which are presumably on the
// given branch, are gucci.
func (proj *Project) VerifyCommits(branchName plumbing.ReferenceName, commits []Commit) error {
// this isn't strictly necessary for this method, but it helps discover bugs
// in other parts of the code.
if len(commits) == 0 {
return errors.New("cannot call VerifyCommits with empty commit slice")
}
// First determine the root of the main branch. All commits need to be an
// ancestor of it. If the main branch has not been created yet then there
// might not be a root commit yet.
var rootCommitObj *object.Commit
mainCommit, err := proj.GetCommitByRevision(plumbing.Revision(MainRefName))
if errors.Is(err, plumbing.ErrReferenceNotFound) {
// main branch hasn't been created yet. The commits can only be verified
// if they are for the main branch and they include the root commit.
if branchName != MainRefName {
return fmt.Errorf("cannot verify commits in branch %q when no main branch exists", branchName)
}
for _, commit := range commits {
if commit.Object.NumParents() == 0 {
rootCommitObj = commit.Object
break
}
}
if rootCommitObj == nil {
return errors.New("root commit of main branch cannot be determined")
}
} else if err != nil {
return fmt.Errorf("retrieving commit at HEAD of %q: %w", MainRefName.Short(), err)
} else {
rootCommitObj = mainCommit.Object
for {
if rootCommitObj.NumParents() == 0 {
break
} else if rootCommitObj.NumParents() > 1 {
return fmt.Errorf("commit %q in main branch has more than one parent", rootCommitObj.Hash)
} else if rootCommitObj, err = rootCommitObj.Parent(0); err != nil {
return fmt.Errorf("retrieving parent commit of %q: %w", rootCommitObj.Hash, err)
}
}
}
// We also need the HEAD of the given branch, if it exists.
branchCommit, err := proj.GetCommitByRevision(plumbing.Revision(branchName))
if err != nil && !errors.Is(err, plumbing.ErrReferenceNotFound) {
return fmt.Errorf("retrieving commit at HEAD of %q: %w", branchName.Short(), err)
}
for i, commit := range commits {
// It's not a requirement that the given Commits are in ancestral order,
// but usually they are; if the previous commit is the parent of this
// one we can skip a bunch of work.
var parentTree *object.Tree
var isNonFF bool
if i > 0 && commits[i-1].Hash == commit.Object.ParentHashes[0] {
parentTree = commits[i-1].TreeObject
} else if commit.Hash == rootCommitObj.Hash {
// looking at the root commit, assume it's ok
} else {
var err error
isAncestor := func(older, younger *object.Commit) bool {
var isAncestor bool
if err != nil {
return false
} else if isAncestor, err = older.IsAncestor(younger); err != nil {
err = fmt.Errorf("determining if %q is an ancestor of %q: %w",
younger.Hash, older.Hash, err)
return false
}
return isAncestor
}
ancestorOfRoot := isAncestor(rootCommitObj, commit.Object)
if branchCommit.Hash != plumbing.ZeroHash { // checking if the var was set
// this could only be a nonFF if the branch actually exists.
isNonFF = !isAncestor(branchCommit.Object, commit.Object)
}
if err != nil {
return err
} else if !ancestorOfRoot {
return fmt.Errorf("commit %q must be direct descendant of root commit of %q (%q)",
commit.Hash, MainRefName.Short(), rootCommitObj.Hash,
)
}
}
if err := proj.verifyCommit(branchName, commit, parentTree, isNonFF); err != nil {
return fmt.Errorf("verifying commit %q: %w", commit.Hash, err)
}
}
return nil
}
// parentTree returns the tree of the parent commit of the given commit. If the
// given commit has no parents then a bare tree is returned.
func (proj *Project) parentTree(commitObj *object.Commit) (*object.Tree, error) {
switch commitObj.NumParents() {
case 0:
return new(object.Tree), nil
case 1:
if parentCommitObj, err := commitObj.Parent(0); err != nil {
return nil, fmt.Errorf("getting parent commit %q: %w",
commitObj.ParentHashes[0], err)
} else if parentTree, err := proj.GitRepo.TreeObject(parentCommitObj.TreeHash); err != nil {
return nil, fmt.Errorf("getting parent tree object %q: %w",
parentCommitObj.TreeHash, err)
} else {
return parentTree, nil
}
default:
return nil, errors.New("commit has multiple parents")
}
}
// if parentTree is nil then it will be inferred.
func (proj *Project) verifyCommit(
branchName plumbing.ReferenceName,
commit Commit,
parentTree *object.Tree,
isNonFF bool,
) error {
if parentTree == nil {
var err error
if parentTree, err = proj.parentTree(commit.Object); err != nil {
return fmt.Errorf("retrieving parent tree of commit: %w", err)
}
}
var sigFS fs.FS
if commit.Object.NumParents() == 0 {
sigFS = fs.FromTree(commit.TreeObject)
} else {
sigFS = fs.FromTree(parentTree)
}
cfg, err := proj.loadConfig(sigFS)
if err != nil {
return fmt.Errorf("loading config of parent %q: %w", commit.Object.ParentHashes[0], err)
}
// assert access controls
changedFiles, err := ChangedFilesBetweenTrees(parentTree, commit.TreeObject)
if err != nil {
return fmt.Errorf("calculating diff from tree %q to tree %q: %w",
parentTree.Hash, commit.TreeObject.Hash, err)
} else if len(changedFiles) > 0 && commit.Payload.Change == nil {
return errors.New("files changes but commit is not a change commit")
}
pathsChanged := make([]string, len(changedFiles))
for i := range changedFiles {
pathsChanged[i] = changedFiles[i].Path
}
commitType := commit.Payload.Type()
err = accessctl.AssertCanCommit(cfg.AccessControls, accessctl.CommitRequest{
Type: commitType,
Branch: branchName.Short(),
Credentials: commit.Payload.Common.Credentials,
FilesChanged: pathsChanged,
NonFastForward: isNonFF,
})
if err != nil {
return fmt.Errorf("asserting access controls: %w", err)
}
// ensure the fingerprint is what it's expected to be
storedFingerprint := commit.Payload.Common.Fingerprint
expectedFingerprint, err := commit.Payload.Payload().Fingerprint(changedFiles)
if err != nil {
return fmt.Errorf("calculating expected payload fingerprint: %w", err)
} else if expectedFingerprint == nil {
// the payload doesn't have a fingerprint of its own, it's just carrying
// one, so no point in checking if it's "correct".
} else if !bytes.Equal(storedFingerprint, expectedFingerprint) {
return fmt.Errorf("unexpected fingerprint in payload, is %q but should be %q",
storedFingerprint, yamlutil.Blob(expectedFingerprint))
}
// verify all credentials
for _, cred := range commit.Payload.Common.Credentials {
if cred.AccountID == "" {
if err := cred.SelfVerify(storedFingerprint); err != nil {
return fmt.Errorf("verifying credential %+v: %w", cred, err)
}
} else {
sig, err := proj.signifierForCredential(sigFS, cred)
if err != nil {
return fmt.Errorf("finding signifier for credential %+v: %w", cred, err)
} else if err := sig.Verify(sigFS, storedFingerprint, cred); err != nil {
return fmt.Errorf("verifying credential %+v: %w", cred, err)
}
}
}
return nil
}
// LastChangeDescription iterates over the given commits in reverse order and
// returns the first change description it comes across. A change description
// may come from a change payload or a credential payload which covers a set of
// changes.
//
// This function will return an error if no given commits contain a change
// description.
func LastChangeDescription(commits []Commit) (string, error) {
for i := range commits {
i = len(commits) - 1 - i
payUn := commits[i].Payload
if payUn.Change != nil {
return payUn.Change.Description, nil
} else if payUn.Credential != nil && payUn.Credential.ChangeDescription != "" {
return payUn.Credential.ChangeDescription, nil
}
}
return "", errors.New("no commits in range contain a change description")
}
type changeRangeInfo struct {
changeCommits []Commit
authors map[string]struct{}
startTree, endTree *object.Tree
changeDescription string
}
// changeRangeInfo returns various pieces of information about a range of
// commits' changes.
func (proj *Project) changeRangeInfo(commits []Commit) (changeRangeInfo, error) {
info := changeRangeInfo{
authors: map[string]struct{}{},
}
for _, commit := range commits {
if commit.Payload.Change != nil {
info.changeCommits = append(info.changeCommits, commit)
for _, cred := range commit.Payload.Common.Credentials {
info.authors[cred.AccountID] = struct{}{}
}
}
}
if len(info.changeCommits) == 0 {
return changeRangeInfo{}, errors.New("no change commits found in range")
}
// startTree has to be the tree of the parent of the first commit, which
// isn't included in commits. Determine it the hard way.
var err error
if info.startTree, err = proj.parentTree(commits[0].Object); err != nil {
return changeRangeInfo{}, fmt.Errorf("getting tree of parent of %q: %w",
commits[0].Hash, err)
} else if info.changeDescription, err = LastChangeDescription(commits); err != nil {
return changeRangeInfo{}, err
}
lastChangeCommit := info.changeCommits[len(info.changeCommits)-1]
info.endTree = lastChangeCommit.TreeObject
return info, nil
}
func (info changeRangeInfo) changeFingerprint(descr string) ([]byte, error) {
changedFiles, err := ChangedFilesBetweenTrees(info.startTree, info.endTree)
if err != nil {
return nil, fmt.Errorf("calculating diff of commit trees %q and %q: %w",
info.startTree.Hash, info.endTree.Hash, err)
}
return genChangeFingerprint(nil, descr, changedFiles), nil
}
// VerifyCanSetBranchHEADTo is used to verify that a branch's HEAD can be set to
// the given hash. It verifies any new commits which are being added, and
// handles verifying non-fast-forward commits as well.
//
// If the given hash matches the current HEAD of the branch then this performs
// no further checks and returns nil.
func (proj *Project) VerifyCanSetBranchHEADTo(branchName plumbing.ReferenceName, hash plumbing.Hash) error {
oldCommitRef, err := proj.GitRepo.Reference(branchName, true)
if errors.Is(err, plumbing.ErrReferenceNotFound) {
// if the branch is being created then just pull all of its commits and
// verify them.
// TODO optimize this so that it tries to use the merge-base with main,
// so we're not re-verifying a ton of commits unecessarily
commits, err := proj.GetCommitRange(plumbing.ZeroHash, hash)
if err != nil {
return fmt.Errorf("retrieving %q and all its ancestors: %w", hash, err)
}
return proj.VerifyCommits(branchName, commits)
} else if err != nil {
return fmt.Errorf("resolving branch reference to a hash: %w", err)
} else if oldCommitRef.Hash() == hash {
// if the HEAD is already at the given hash then it must be fine.
return nil
}
oldCommitObj, err := proj.GitRepo.CommitObject(oldCommitRef.Hash())
if err != nil {
return fmt.Errorf("retrieving commit object %q: %w", oldCommitRef.Hash(), err)
}
newCommit, err := proj.GetCommit(hash)
if err != nil {
return fmt.Errorf("retrieving commit %q: %w", hash, err)
}
if isAncestor, err := newCommit.Object.IsAncestor(oldCommitObj); err != nil {
return fmt.Errorf("determining if %q is an ancestor of %q: %w",
newCommit.Hash, oldCommitObj.Hash, err)
} else if isAncestor {
// if the new commit is an ancestor of the old one then the branch is
// being force-pushed to a previous commit. This is weird to handle
// using VerifyCommits, so just call verifyCommit directly.
return proj.verifyCommit(branchName, newCommit, nil, true)
}
mbCommits, err := oldCommitObj.MergeBase(newCommit.Object)
if err != nil {
return fmt.Errorf("determining merge-base between %q and %q: %w",
oldCommitObj.Hash, newCommit.Hash, err)
} else if len(mbCommits) == 0 {
return fmt.Errorf("%q and %q have no ancestors in common",
oldCommitObj.Hash, newCommit.Hash)
} else if len(mbCommits) == 2 {
return fmt.Errorf("%q and %q have more than one ancestor in common",
oldCommitObj.Hash, newCommit.Hash)
}
commits, err := proj.GetCommitRange(mbCommits[0].Hash, hash)
if err != nil {
return fmt.Errorf("retrieving commits %q to %q: %w", mbCommits[0].Hash, hash, err)
}
return proj.VerifyCommits(branchName, commits)
}

@ -0,0 +1,176 @@
package dehub
import (
"bytes"
"errors"
"fmt"
"sort"
"strings"
"dehub.dev/src/dehub.git/fs"
"dehub.dev/src/dehub.git/sigcred"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/object"
)
// PayloadChange describes the structure of a change payload.
type PayloadChange struct {
Description string `yaml:"description"`
// LegacyMessage is no longer used, use Description instead
LegacyMessage string `yaml:"message,omitempty"`
}
var _ Payload = PayloadChange{}
// NewPayloadChange constructs a PayloadUnion populated with a PayloadChange
// encompassing the currently staged file changes. The Credentials of the
// returned PayloadUnion will _not_ be filled in.
func (proj *Project) NewPayloadChange(description string) (PayloadUnion, error) {
headTree := new(object.Tree)
if head, err := proj.GetHeadCommit(); err != nil && !errors.Is(err, ErrHeadIsZero) {
return PayloadUnion{}, fmt.Errorf("getting HEAD commit: %w", err)
} else if err == nil {
headTree = head.TreeObject
}
_, stagedTree, err := fs.FromStagedChangesTree(proj.GitRepo)
if err != nil {
return PayloadUnion{}, err
}
changedFiles, err := ChangedFilesBetweenTrees(headTree, stagedTree)
if err != nil {
return PayloadUnion{}, fmt.Errorf("calculating diff between HEAD and staged changes: %w", err)
}
payCh := PayloadChange{Description: description}
fingerprint, err := payCh.Fingerprint(changedFiles)
if err != nil {
return PayloadUnion{}, err
}
return PayloadUnion{
Change: &payCh,
Common: PayloadCommon{Fingerprint: fingerprint},
}, nil
}
// MessageHead implements the method for the Payload interface.
func (payCh PayloadChange) MessageHead(PayloadCommon) string {
return payCh.Description
}
// Fingerprint implements the method for the Payload interface.
func (payCh PayloadChange) Fingerprint(changedFiles []ChangedFile) ([]byte, error) {
return genChangeFingerprint(nil, payCh.Description, changedFiles), nil
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (payCh *PayloadChange) UnmarshalYAML(unmarshal func(interface{}) error) error {
var wrap struct {
Inner PayloadChange `yaml:",inline"`
}
if err := unmarshal(&wrap); err != nil {
return err
}
*payCh = wrap.Inner
if payCh.LegacyMessage != "" {
payCh.Description = payCh.LegacyMessage
payCh.LegacyMessage = ""
}
return nil
}
// CombinePayloadChanges takes all changes in the given range, combines them
// into a single PayloadChange, and commits it. The resulting payload will have
// the same message as the latest change payload in the range. If the
// fingerprint of the PayloadChange produced by this method has any matching
// Credentials in the range, those will be included in the payload as well.
//
// The combined commit is committed to the project with the given revision as
// its parent. If the diff across the given range and the diff from onto to the
// end of the range are different then this will return an error.
func (proj *Project) CombinePayloadChanges(commits []Commit, onto plumbing.ReferenceName) (Commit, error) {
info, err := proj.changeRangeInfo(commits)
if err != nil {
return Commit{}, err
}
commitsFingerprint, err := info.changeFingerprint(info.changeDescription)
if err != nil {
return Commit{}, err
}
authors := make([]string, 0, len(info.authors))
for author := range info.authors {
authors = append(authors, author)
}
sort.Strings(authors)
ontoBranchName, err := proj.ReferenceToBranchName(onto)
if err != nil {
return Commit{}, fmt.Errorf("resolving %q into a branch name: %w", onto, err)
}
// now determine the change hash from onto->end, to ensure that it remains
// the same as from start->end
ontoCommit, err := proj.GetCommitByRevision(plumbing.Revision(onto))
if err != nil {
return Commit{}, fmt.Errorf("resolving revision %q: %w", onto, err)
}
ontoEndChangedFiles, err := ChangedFilesBetweenTrees(ontoCommit.TreeObject, info.endTree)
if err != nil {
return Commit{}, fmt.Errorf("calculating file changes between %q and %q: %w",
ontoCommit.Hash, commits[len(commits)-1].Hash, err)
}
ontoEndChangeFingerprint := genChangeFingerprint(nil, info.changeDescription, ontoEndChangedFiles)
if !bytes.Equal(ontoEndChangeFingerprint, commitsFingerprint) {
// TODO figure out what files to show as being the "problem files" in
// the error message
return Commit{}, fmt.Errorf("combining onto %q would produce a different change fingerprint, aborting combine", onto.Short())
}
var creds []sigcred.CredentialUnion
for _, commit := range commits {
if bytes.Equal(commit.Payload.Common.Fingerprint, commitsFingerprint) {
creds = append(creds, commit.Payload.Common.Credentials...)
}
}
// this is mostly to make tests easier
sort.Slice(creds, func(i, j int) bool {
return creds[i].AccountID < creds[j].AccountID
})
payUn := PayloadUnion{
Change: &PayloadChange{
Description: info.changeDescription,
},
Common: PayloadCommon{
Fingerprint: commitsFingerprint,
Credentials: creds,
},
}
commit, err := proj.CommitDirect(CommitDirectParams{
PayloadUnion: payUn,
Author: strings.Join(authors, ","),
ParentHash: ontoCommit.Hash,
GitTree: info.endTree,
})
if err != nil {
return Commit{}, fmt.Errorf("storing commit: %w", err)
}
// set the onto branch to this new commit
newHeadRef := plumbing.NewHashReference(ontoBranchName, commit.Hash)
if err := proj.GitRepo.Storer.SetReference(newHeadRef); err != nil {
return Commit{}, fmt.Errorf("setting reference %q to new commit hash %q: %w",
ontoBranchName, commit.Hash, err)
}
return commit, nil
}

@ -0,0 +1,183 @@
package dehub
import (
"reflect"
"strings"
"testing"
"github.com/davecgh/go-spew/spew"
"gopkg.in/src-d/go-git.v4/plumbing"
)
func TestPayloadChangeVerify(t *testing.T) {
type step struct {
descr string
msgHead string // defaults to msg
tree map[string]string
}
testCases := []struct {
descr string
steps []step
}{
{
descr: "single commit",
steps: []step{
{
descr: "first commit",
tree: map[string]string{"a": "0", "b": "1"},
},
},
},
{
descr: "multiple commits",
steps: []step{
{
descr: "first commit",
tree: map[string]string{"a": "0", "b": "1"},
},
{
descr: "second commit, changing a",
tree: map[string]string{"a": "1"},
},
{
descr: "third commit, empty",
},
{
descr: "fourth commit, adding c, removing b",
tree: map[string]string{"b": "", "c": "2"},
},
},
},
{
descr: "big body commits",
steps: []step{
{
descr: "first commit, single line but with newline\n",
},
{
descr: "second commit, single line but with two newlines\n\n",
msgHead: "second commit, single line but with two newlines\n\n",
},
{
descr: "third commit, multi-line with one newline\nanother line!",
msgHead: "third commit, multi-line with one newline\n\n",
},
{
descr: "fourth commit, multi-line with two newlines\n\nanother line!",
msgHead: "fourth commit, multi-line with two newlines\n\n",
},
},
},
}
for _, test := range testCases {
t.Run(test.descr, func(t *testing.T) {
h := newHarness(t)
rootSig := h.stageNewAccount("root", false)
for _, step := range test.steps {
h.stage(step.tree)
commit := h.assertCommitChange(verifyShouldSucceed, step.descr, rootSig)
if step.msgHead == "" {
step.msgHead = strings.TrimSpace(step.descr) + "\n\n"
}
if !strings.HasPrefix(commit.Object.Message, step.msgHead) {
t.Fatalf("commit message %q does not start with expected head %q",
commit.Object.Message, step.msgHead)
}
var payUn PayloadUnion
if err := payUn.UnmarshalText([]byte(commit.Object.Message)); err != nil {
t.Fatalf("error unmarshaling commit message: %v", err)
} else if !reflect.DeepEqual(payUn, commit.Payload) {
t.Fatalf("returned change payload:\n%s\ndoes not match actual one:\n%s",
spew.Sdump(commit.Payload), spew.Sdump(payUn))
}
}
})
}
}
func TestCombinePayloadChanges(t *testing.T) {
h := newHarness(t)
// commit initial config, so the root user can modify it in the next commit
rootSig := h.stageNewAccount("root", false)
h.assertCommitChange(verifyShouldSucceed, "initial commit", rootSig)
// add a toot user and modify the access controls such that both accounts
// are required for the main branch
tootSig := h.stageNewAccount("toot", false)
h.stageAccessControls(`
- action: allow
filters:
- type: branch
pattern: main
- type: payload_type
payload_type: change
- type: signature
any_account: true
count: 2
- action: allow
filters:
- type: not
filter:
type: branch
pattern: main
- type: signature
any_account: true
count: 1
`)
tootCommit := h.assertCommitChange(verifyShouldSucceed, "add toot", rootSig)
// make a single change commit in another branch using root. Then add a
// credential using toot, and combine them onto main.
otherBranch := plumbing.NewBranchReferenceName("other")
h.checkout(otherBranch)
h.stage(map[string]string{"foo": "bar"})
fooCommit := h.assertCommitChange(verifyShouldSucceed, "add foo file", rootSig)
// now adding a credential commit from toot should work
credCommitPayUn, err := h.proj.NewPayloadCredential(fooCommit.Payload.Common.Fingerprint)
if err != nil {
t.Fatal(err)
}
credCommit := h.tryCommit(verifyShouldSucceed, credCommitPayUn, tootSig)
allCommits, err := h.proj.GetCommitRange(tootCommit.Hash, credCommit.Hash)
if err != nil {
t.Fatalf("getting commits: %v", err)
}
combinedCommit, err := h.proj.CombinePayloadChanges(allCommits, MainRefName)
if err != nil {
t.Fatal(err)
}
// that new commit should have both credentials
creds := combinedCommit.Payload.Common.Credentials
if len(creds) != 2 {
t.Fatalf("combined commit has %d credentials, not 2", len(creds))
} else if creds[0].AccountID != "root" {
t.Fatalf("combined commit first credential should be from root, is from %q", creds[0].AccountID)
} else if creds[1].AccountID != "toot" {
t.Fatalf("combined commit second credential should be from toot, is from %q", creds[1].AccountID)
}
// double check that the HEAD commit of main got properly set
h.checkout(MainRefName)
mainHead, err := h.proj.GetHeadCommit()
if err != nil {
t.Fatal(err)
} else if mainHead.Hash != combinedCommit.Hash {
t.Fatalf("mainHead's should be pointed at %s but is pointed at %s",
combinedCommit.Hash, mainHead.Hash)
} else if err = h.proj.VerifyCommits(MainRefName, []Commit{combinedCommit}); err != nil {
t.Fatalf("unable to verify combined commit: %v", err)
} else if author := combinedCommit.Object.Author.Name; author != "root" {
t.Fatalf("unexpected author value %q", author)
}
}

@ -0,0 +1,39 @@
package dehub
import (
"errors"
)
// PayloadComment describes the structure of a comment payload.
type PayloadComment struct {
Comment string `yaml:"comment"`
}
var _ Payload = PayloadComment{}
// NewPayloadComment constructs a PayloadUnion populated with a PayloadComment.
// The Credentials of the returned PayloadUnion will _not_ be filled in.
func (proj *Project) NewPayloadComment(comment string) (PayloadUnion, error) {
payCom := PayloadComment{Comment: comment}
fingerprint, err := payCom.Fingerprint(nil)
if err != nil {
return PayloadUnion{}, err
}
return PayloadUnion{
Comment: &payCom,
Common: PayloadCommon{Fingerprint: fingerprint},
}, nil
}
// MessageHead implements the method for the Payload interface.
func (payCom PayloadComment) MessageHead(common PayloadCommon) string {
return `"` + payCom.Comment + `"`
}
// Fingerprint implements the method for the Payload interface.
func (payCom PayloadComment) Fingerprint(changes []ChangedFile) ([]byte, error) {
if len(changes) > 0 {
return nil, errors.New("PayloadComment cannot have any changed files")
}
return genCommentFingerprint(nil, payCom.Comment), nil
}

@ -0,0 +1,82 @@
package dehub
import (
"errors"
)
// PayloadCredential describes the structure of a credential payload.
type PayloadCredential struct {
// CommitHashes represents the commits which this credential is accrediting.
// It is only present for informational purposes, as commits don't not have
// any bearing on the CredentialedHash itself.
CommitHashes []string `yaml:"commits,omitempty"`
// ChangeDescription represents the description which has been credentialed.
// This field is only relevant if the Credential in the payload is for a
// change set.
ChangeDescription string `yaml:"change_description"`
}
var _ Payload = PayloadCredential{}
// NewPayloadCredential constructs and returns a PayloadUnion populated with a
// PayloadCredential for the given fingerprint. The Credentials of the returned
// PayloadUnion will _not_ be filled in.
func (proj *Project) NewPayloadCredential(fingerprint []byte) (PayloadUnion, error) {
return PayloadUnion{
Credential: &PayloadCredential{},
Common: PayloadCommon{Fingerprint: fingerprint},
}, nil
}
// NewPayloadCredentialFromChanges constructs and returns a PayloadUnion
// populated with a PayloadCredential. The fingerprint of the payload will be a
// change fingerprint generated from the given description and all changes in
// the given range of Commits.
//
// If an empty description is given then the description of the last change
// payload in the range is used when generating the fingerprint.
func (proj *Project) NewPayloadCredentialFromChanges(descr string, commits []Commit) (PayloadUnion, error) {
info, err := proj.changeRangeInfo(commits)
if err != nil {
return PayloadUnion{}, err
}
if descr == "" {
descr = info.changeDescription
}
fingerprint, err := info.changeFingerprint(descr)
if err != nil {
return PayloadUnion{}, err
}
payCred, err := proj.NewPayloadCredential(fingerprint)
if err != nil {
return PayloadUnion{}, err
}
payCred.Credential.ChangeDescription = descr
for _, commit := range info.changeCommits {
payCred.Credential.CommitHashes = append(
payCred.Credential.CommitHashes,
commit.Hash.String(),
)
}
return payCred, nil
}
// MessageHead implements the method for the Payload interface.
func (payCred PayloadCredential) MessageHead(common PayloadCommon) string {
return "Credential of " + common.Fingerprint.String()
}
// Fingerprint implements the method for the Payload interface.
func (payCred PayloadCredential) Fingerprint(changes []ChangedFile) ([]byte, error) {
if len(changes) > 0 {
return nil, errors.New("PayloadCredential cannot have any changed files")
}
// a PayloadCredential can't compute its own fingerprint, it's stored in the
// common.
return nil, nil
}

@ -0,0 +1,50 @@
package dehub
import (
"testing"
"gopkg.in/src-d/go-git.v4/plumbing"
)
func TestPayloadCredentialVerify(t *testing.T) {
h := newHarness(t)
rootSig := h.stageNewAccount("root", false)
// create a new account and modify the config so that that account is only
// allowed to add verifications to a single branch
tootSig := h.stageNewAccount("toot", false)
tootBranch := plumbing.NewBranchReferenceName("toot_branch")
h.stageAccessControls(`
- action: allow
filters:
- type: branch
pattern: ` + tootBranch.Short() + `
- type: signature
count: 1
account_ids:
- root
- toot
- action: allow
filters:
- type: signature
count: 1
account_ids:
- root
`)
rootGitCommit := h.assertCommitChange(verifyShouldSucceed, "initial commit", rootSig)
// toot user wants to create a credential commit for the root commit, for
// whatever reason.
rootChangeFingerprint := rootGitCommit.Payload.Common.Fingerprint
credCommitPayUn, err := h.proj.NewPayloadCredential(rootChangeFingerprint)
if err != nil {
t.Fatalf("creating credential commit for fingerprint %x: %v", rootChangeFingerprint, err)
}
h.tryCommit(verifyShouldFail, credCommitPayUn, tootSig)
// toot tries again in their own branch, and should be allowed.
h.checkout(tootBranch)
h.tryCommit(verifyShouldSucceed, credCommitPayUn, tootSig)
}

@ -0,0 +1,452 @@
package dehub
import (
"errors"
"regexp"
"testing"
"dehub.dev/src/dehub.git/accessctl"
"dehub.dev/src/dehub.git/sigcred"
"gopkg.in/src-d/go-git.v4"
"gopkg.in/src-d/go-git.v4/plumbing"
)
func TestConfigChange(t *testing.T) {
h := newHarness(t)
rootSig := h.stageNewAccount("root", false)
var commits []Commit
// commit the initial staged changes, which merely include the config and
// public key
commit := h.assertCommitChange(verifyShouldSucceed, "commit configuration", rootSig)
commits = append(commits, commit)
// create a new account and add it to the configuration. That commit should
// not be verifiable, though
tootSig := h.stageNewAccount("toot", false)
h.stageCfg()
h.assertCommitChange(verifyShouldFail, "add toot user", tootSig)
// now add with the root user, this should work.
h.stageCfg()
commit = h.assertCommitChange(verifyShouldSucceed, "add toot user", rootSig)
commits = append(commits, commit)
// _now_ the toot user should be able to do things.
h.stage(map[string]string{"foo/bar": "what a cool file"})
commit = h.assertCommitChange(verifyShouldSucceed, "add a cool file", tootSig)
commits = append(commits, commit)
if err := h.proj.VerifyCommits(MainRefName, commits); err != nil {
t.Fatal(err)
}
}
func TestMainAncestryRequirement(t *testing.T) {
otherBranch := plumbing.NewBranchReferenceName("other")
t.Run("empty repo", func(t *testing.T) {
h := newHarness(t)
rootSig := h.stageNewAccount("root", false)
h.checkout(otherBranch)
// stage and try to add to the "other" branch, it shouldn't work though
h.stageCfg()
h.assertCommitChange(verifyShouldFail, "starting new branch at other", rootSig)
})
t.Run("new branch, single commit", func(t *testing.T) {
h := newHarness(t)
rootSig := h.stageNewAccount("root", false)
h.assertCommitChange(verifyShouldSucceed, "add cfg", rootSig)
// set HEAD to this other branch which doesn't really exist
ref := plumbing.NewSymbolicReference(plumbing.HEAD, otherBranch)
if err := h.proj.GitRepo.Storer.SetReference(ref); err != nil {
h.t.Fatal(err)
}
h.stageCfg()
h.assertCommitChange(verifyShouldFail, "starting new branch at other", rootSig)
})
}
func TestAnonymousCommits(t *testing.T) {
h := newHarness(t)
anonSig := h.stageNewAccount("anon", true)
h.stageAccessControls(`
- action: allow
filters:
- type: signature
any: true
`)
h.assertCommitChange(verifyShouldSucceed, "this will work", anonSig)
}
func TestNonFastForwardCommits(t *testing.T) {
h := newHarness(t)
rootSig := h.stageNewAccount("root", false)
initCommit := h.assertCommitChange(verifyShouldSucceed, "init", rootSig)
// add another commit
h.stage(map[string]string{"foo": "foo"})
fooCommit := h.assertCommitChange(verifyShouldSucceed, "foo", rootSig)
commitOn := func(hash plumbing.Hash, msg string) Commit {
ref := plumbing.NewHashReference(plumbing.HEAD, hash)
if err := h.proj.GitRepo.Storer.SetReference(ref); err != nil {
h.t.Fatal(err)
} else if commitChange, err := h.proj.NewPayloadChange("bar"); err != nil {
h.t.Fatal(err)
} else if commitChange, err = h.proj.AccreditPayload(commitChange, rootSig); err != nil {
h.t.Fatal(err)
} else if gitCommit, err := h.proj.Commit(commitChange); err != nil {
h.t.Fatal(err)
} else {
return gitCommit
}
panic("can't get here")
}
// checkout initCommit directly, make a new commit on top of it, and try to
// verify that (this is too fancy for the harness, must be done manually).
h.stage(map[string]string{"bar": "bar"})
barCommit := commitOn(initCommit.Hash, "bar")
err := h.proj.VerifyCommits(MainRefName, []Commit{barCommit})
if !errors.As(err, new(accessctl.ErrCommitRequestDenied)) {
h.t.Fatalf("expected ErrCommitRequestDenied, got: %v", err)
}
// check main back out (fooCommit should be checked out), and modify the
// config to allow nonFF commits, and add another bogus commit on top.
h.checkout(MainRefName)
h.stageAccessControls(`
- action: allow
filters:
- type: commit_attributes
non_fast_forward: true`)
h.stageCfg()
allowNonFFCommit := h.assertCommitChange(verifyShouldSucceed, "allow non-ff", rootSig)
h.stage(map[string]string{"foo": "foo foo"})
h.assertCommitChange(verifyShouldSucceed, "foo foo", rootSig)
// checking out allowNonFFCommit directly and performing a nonFF commit
// should work now.
h.stage(map[string]string{"baz": "baz"})
bazCommit := commitOn(allowNonFFCommit.Hash, "baz")
if err = h.proj.VerifyCommits(MainRefName, []Commit{bazCommit}); err != nil {
h.t.Fatal(err)
}
// verifying the full history should also work
gitCommits := []Commit{initCommit, fooCommit, allowNonFFCommit, bazCommit}
if err = h.proj.VerifyCommits(MainRefName, gitCommits); err != nil {
h.t.Fatal(err)
}
}
func TestVerifyCanSetBranchHEADTo(t *testing.T) {
type toTest struct {
// branchName and hash are the arguments passed into
// VerifyCanSetBranchHEADTo.
branchName plumbing.ReferenceName
hash plumbing.Hash
// if set then the branch will have its HEAD reset to this hash prior to
// calling VerifyCanSetBranchHEADTo.
resetTo plumbing.Hash
}
type test struct {
descr string
init func(h *harness, rootSig sigcred.Signifier) toTest
// If true then the verify call is expected to fail. The string is a
// regex which should match the unwrapped error returned.
expErr string
}
tests := []test{
{
descr: "creation of main",
init: func(h *harness, rootSig sigcred.Signifier) toTest {
// checkout other and build on top of that, so that when
// VerifyCanSetBranchHEADTo is called main won't exist.
other := plumbing.NewBranchReferenceName("other")
h.checkout(other)
initCommit := h.assertCommitChange(verifySkip, "init", rootSig)
return toTest{
branchName: MainRefName,
hash: initCommit.Hash,
}
},
},
{
descr: "main ff",
init: func(h *harness, rootSig sigcred.Signifier) toTest {
initCommit := h.assertCommitChange(verifySkip, "init", rootSig)
h.stage(map[string]string{"foo": "foo"})
nextCommit := h.assertCommitChange(verifySkip, "next", rootSig)
return toTest{
branchName: MainRefName,
hash: nextCommit.Hash,
resetTo: initCommit.Hash,
}
},
},
{
descr: "new branch, no main",
init: func(h *harness, rootSig sigcred.Signifier) toTest {
// checkout other and build on top of that, so that when
// VerifyCanSetBranchHEADTo is called main won't exist.
other := plumbing.NewBranchReferenceName("other")
h.checkout(other)
initCommit := h.assertCommitChange(verifySkip, "init", rootSig)
return toTest{
branchName: plumbing.NewBranchReferenceName("other2"),
hash: initCommit.Hash,
}
},
expErr: `^cannot verify commits in branch "refs/heads/other2" when no main branch exists$`,
},
{
// this case isn't generally possible, unless someone manually
// creates a branch in an empty repo on the remote
descr: "existing branch, no main",
init: func(h *harness, rootSig sigcred.Signifier) toTest {
// checkout other and build on top of that, so that when
// VerifyCanSetBranchHEADTo is called main won't exist.
other := plumbing.NewBranchReferenceName("other")
h.checkout(other)
initCommit := h.assertCommitChange(verifySkip, "init", rootSig)
h.stage(map[string]string{"foo": "foo"})
fooCommit := h.assertCommitChange(verifySkip, "foo", rootSig)
return toTest{
branchName: other,
hash: fooCommit.Hash,
resetTo: initCommit.Hash,
}
},
expErr: `^cannot verify commits in branch "refs/heads/other" when no main branch exists$`,
},
{
descr: "new branch, not ancestor of main",
init: func(h *harness, rootSig sigcred.Signifier) toTest {
h.assertCommitChange(verifySkip, "init", rootSig)
// create new branch with no HEAD, and commit on that.
other := plumbing.NewBranchReferenceName("other")
ref := plumbing.NewSymbolicReference(plumbing.HEAD, other)
if err := h.proj.GitRepo.Storer.SetReference(ref); err != nil {
t.Fatal(err)
}
h.stageCfg()
h.stage(map[string]string{"foo": "foo"})
badInitCommit := h.assertCommitChange(verifySkip, "a different init", rootSig)
return toTest{
branchName: plumbing.NewBranchReferenceName("other2"),
hash: badInitCommit.Hash,
}
},
expErr: `^commit "[0-9a-f]+" must be direct descendant of root commit of "main" \("[0-9a-f]+"\)$`,
},
{
// this case isn't generally possible, unless someone manually
// creates a branch in an empty repo on the remote
descr: "existing branch, not ancestor of main",
init: func(h *harness, rootSig sigcred.Signifier) toTest {
h.assertCommitChange(verifySkip, "init", rootSig)
// create new branch with no HEAD, and commit on that.
other := plumbing.NewBranchReferenceName("other")
ref := plumbing.NewSymbolicReference(plumbing.HEAD, other)
if err := h.proj.GitRepo.Storer.SetReference(ref); err != nil {
t.Fatal(err)
}
h.stageCfg()
h.stage(map[string]string{"foo": "foo"})
badInitCommit := h.assertCommitChange(verifySkip, "a different init", rootSig)
h.stage(map[string]string{"bar": "bar"})
barCommit := h.assertCommitChange(verifySkip, "bar", rootSig)
return toTest{
branchName: other,
hash: barCommit.Hash,
resetTo: badInitCommit.Hash,
}
},
expErr: `^commit "[0-9a-f]+" must be direct descendant of root commit of "main" \("[0-9a-f]+"\)$`,
},
{
descr: "new branch off of main",
init: func(h *harness, rootSig sigcred.Signifier) toTest {
initCommit := h.assertCommitChange(verifySkip, "init", rootSig)
other := plumbing.NewBranchReferenceName("other")
h.checkout(other)
h.stage(map[string]string{"foo": "foo"})
fooCommit := h.assertCommitChange(verifySkip, "foo", rootSig)
return toTest{
branchName: other,
hash: fooCommit.Hash,
resetTo: initCommit.Hash,
}
},
},
{
descr: "new branch off of older main commit",
init: func(h *harness, rootSig sigcred.Signifier) toTest {
initCommit := h.assertCommitChange(verifySkip, "init", rootSig)
h.stage(map[string]string{"foo": "foo"})
h.assertCommitChange(verifySkip, "foo", rootSig)
other := plumbing.NewBranchReferenceName("other")
h.checkout(other)
h.reset(initCommit.Hash, git.HardReset)
h.stage(map[string]string{"bar": "bar"})
barCommit := h.assertCommitChange(verifySkip, "bar", rootSig)
return toTest{
branchName: other,
hash: barCommit.Hash,
resetTo: initCommit.Hash,
}
},
},
{
descr: "branch ff",
init: func(h *harness, rootSig sigcred.Signifier) toTest {
h.assertCommitChange(verifySkip, "init", rootSig)
other := plumbing.NewBranchReferenceName("other")
h.checkout(other)
var commits []Commit
for _, str := range []string{"foo", "bar", "baz", "biz", "buz"} {
h.stage(map[string]string{str: str})
commit := h.assertCommitChange(verifySkip, str, rootSig)
commits = append(commits, commit)
}
return toTest{
branchName: other,
hash: commits[len(commits)-1].Hash,
resetTo: commits[0].Hash,
}
},
},
{
descr: "main nonff",
init: func(h *harness, rootSig sigcred.Signifier) toTest {
initCommit := h.assertCommitChange(verifySkip, "init", rootSig)
h.stage(map[string]string{"foo": "foo"})
h.assertCommitChange(verifySkip, "foo", rootSig)
// start another branch back at init and make a new commit on it
other := plumbing.NewBranchReferenceName("other")
h.checkout(other)
h.reset(initCommit.Hash, git.HardReset)
h.stage(map[string]string{"bar": "bar"})
barCommit := h.assertCommitChange(verifySkip, "bar", rootSig)
return toTest{
branchName: MainRefName,
hash: barCommit.Hash,
}
},
expErr: `^commit matched and denied by this access control:`,
},
{
descr: "branch nonff",
init: func(h *harness, rootSig sigcred.Signifier) toTest {
h.assertCommitChange(verifySkip, "init", rootSig)
other := plumbing.NewBranchReferenceName("other")
h.checkout(other)
h.stage(map[string]string{"foo": "foo"})
fooCommit := h.assertCommitChange(verifySkip, "foo", rootSig)
h.stage(map[string]string{"bar": "bar"})
h.assertCommitChange(verifySkip, "bar", rootSig)
other2 := plumbing.NewBranchReferenceName("other2")
h.checkout(other2)
h.reset(fooCommit.Hash, git.HardReset)
h.stage(map[string]string{"baz": "baz"})
bazCommit := h.assertCommitChange(verifySkip, "baz", rootSig)
return toTest{
branchName: other,
hash: bazCommit.Hash,
}
},
},
{
descr: "branch nonff to previous commit",
init: func(h *harness, rootSig sigcred.Signifier) toTest {
h.assertCommitChange(verifySkip, "init", rootSig)
other := plumbing.NewBranchReferenceName("other")
h.checkout(other)
h.stage(map[string]string{"foo": "foo"})
fooCommit := h.assertCommitChange(verifySkip, "foo", rootSig)
h.stage(map[string]string{"bar": "bar"})
h.assertCommitChange(verifySkip, "bar", rootSig)
return toTest{
branchName: other,
hash: fooCommit.Hash,
}
},
},
}
for _, test := range tests {
t.Run(test.descr, func(t *testing.T) {
h := newHarness(t)
rootSig := h.stageNewAccount("root", false)
toTest := test.init(h, rootSig)
if toTest.resetTo != plumbing.ZeroHash {
ref := plumbing.NewHashReference(toTest.branchName, toTest.resetTo)
if err := h.proj.GitRepo.Storer.SetReference(ref); err != nil {
t.Fatal(err)
}
}
err := h.proj.VerifyCanSetBranchHEADTo(toTest.branchName, toTest.hash)
if test.expErr == "" {
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
return
} else if err == nil {
t.Fatal("expected verification to fail")
}
ogErr := err
for {
if unwrappedErr := errors.Unwrap(err); unwrappedErr != nil {
err = unwrappedErr
} else {
break
}
}
errRegex := regexp.MustCompile(test.expErr)
if !errRegex.MatchString(err.Error()) {
t.Fatalf("\nexpected error of form %q\nbut got: %v", test.expErr, ogErr)
}
})
}
}

@ -0,0 +1,326 @@
// Package dehub TODO needs package docs
package dehub
import (
"bytes"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"dehub.dev/src/dehub.git/fs"
"gopkg.in/src-d/go-billy.v4"
"gopkg.in/src-d/go-billy.v4/memfs"
"gopkg.in/src-d/go-git.v4"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/plumbing/format/config"
"gopkg.in/src-d/go-git.v4/storage"
"gopkg.in/src-d/go-git.v4/storage/filesystem"
)
const (
// DehubDir defines the name of the directory where all dehub-related files
// are expected to be found within the git repo.
DehubDir = ".dehub"
)
var (
// ConfigPath defines the expected path to the Project's configuration file.
ConfigPath = filepath.Join(DehubDir, "config.yml")
// Main defines the name of the main branch.
Main = "main"
// MainRefName defines the reference name of the main branch.
MainRefName = plumbing.NewBranchReferenceName(Main)
)
type openOpts struct {
bare bool
}
// OpenOption is an option which can be passed to the OpenProject function to
// affect the Project's behavior.
type OpenOption func(*openOpts)
// OpenBareRepo returns an OpenOption which, if true is given, causes the
// OpenProject function to expect to open a bare git repo.
func OpenBareRepo(bare bool) OpenOption {
return func(o *openOpts) {
o.bare = bare
}
}
// Project implements accessing and modifying a local dehub project, as well as
// extending the functionality of the underlying git repo in ways which are
// specifically useful for dehub projects.
type Project struct {
// GitRepo is the git repository which houses the project.
GitRepo *git.Repository
// GitDirFS corresponds to the .git directory (or the entire repo directory
// if it's a bare repo)
GitDirFS billy.Filesystem
}
func extractGitDirFS(storer storage.Storer) (billy.Filesystem, error) {
dotGitFSer, ok := storer.(interface{ Filesystem() billy.Filesystem })
if !ok {
return nil, fmt.Errorf("git storage object of type %T does not expose its underlying filesystem",
storer)
}
return dotGitFSer.Filesystem(), nil
}
// OpenProject opens the dehub project in the given directory and returns a
// Project instance for it.
//
// The given path is expected to have a git repo already initialized.
func OpenProject(path string, options ...OpenOption) (*Project, error) {
var opts openOpts
for _, opt := range options {
opt(&opts)
}
proj := Project{}
var err error
openOpts := &git.PlainOpenOptions{
DetectDotGit: !opts.bare,
}
if proj.GitRepo, err = git.PlainOpenWithOptions(path, openOpts); err != nil {
return nil, fmt.Errorf("opening git repo: %w", err)
} else if proj.GitDirFS, err = extractGitDirFS(proj.GitRepo.Storer); err != nil {
return nil, err
}
return &proj, nil
}
type initOpts struct {
bare bool
remote bool
}
// InitOption is an option which can be passed into the Init functions to affect
// their behavior.
type InitOption func(*initOpts)
// InitBareRepo returns an InitOption which, if true is given, causes the Init
// function to initialize the project's git repo without a worktree.
func InitBareRepo(bare bool) InitOption {
return func(o *initOpts) {
o.bare = bare
}
}
// InitRemoteRepo returns an InitOption which, if true is given, causes the Init
// function to initialize the project's git repo with certain git configuration
// options set which make the repo able to be used as a remote repo.
func InitRemoteRepo(remote bool) InitOption {
return func(o *initOpts) {
o.remote = remote
}
}
// InitProject will initialize a new project at the given path. If bare is true
// then the project's git repo will not have a worktree.
func InitProject(path string, options ...InitOption) (*Project, error) {
var opts initOpts
for _, opt := range options {
opt(&opts)
}
var proj Project
var err error
if proj.GitRepo, err = git.PlainInit(path, opts.bare); err != nil {
return nil, fmt.Errorf("initializing git repo: %w", err)
} else if proj.GitDirFS, err = extractGitDirFS(proj.GitRepo.Storer); err != nil {
return nil, err
} else if err = proj.init(opts); err != nil {
return nil, fmt.Errorf("initializing repo with dehub defaults: %w", err)
}
return &proj, nil
}
// InitMemProject initializes an empty project which only exists in memory.
func InitMemProject(options ...InitOption) *Project {
var opts initOpts
for _, opt := range options {
opt(&opts)
}
fs := memfs.New()
dotGitFS, err := fs.Chroot(git.GitDirName)
if err != nil {
panic(err)
}
storage := filesystem.NewStorage(dotGitFS, cache.NewObjectLRUDefault())
var worktree billy.Filesystem
if !opts.bare {
worktree = fs
}
r, err := git.Init(storage, worktree)
if err != nil {
panic(err)
}
proj := &Project{GitRepo: r, GitDirFS: dotGitFS}
if err := proj.init(opts); err != nil {
panic(err)
}
return proj
}
func (proj *Project) initRemotePreReceive(bare bool) error {
if err := proj.GitDirFS.MkdirAll("hooks", 0755); err != nil {
return fmt.Errorf("creating hooks directory: %w", err)
}
preRcvFlags := os.O_WRONLY | os.O_CREATE | os.O_TRUNC
preRcv, err := proj.GitDirFS.OpenFile("hooks/pre-receive", preRcvFlags, 0755)
if err != nil {
return fmt.Errorf("opening hooks/pre-receive file: %w", err)
}
defer preRcv.Close()
var preRcvBody string
if bare {
preRcvBody = "#!/bin/sh\nexec dehub hook -bare pre-receive\n"
} else {
preRcvBody = "#!/bin/sh\nexec dehub hook pre-receive\n"
}
if _, err := io.Copy(preRcv, bytes.NewBufferString(preRcvBody)); err != nil {
return fmt.Errorf("writing to hooks/pre-receive: %w", err)
}
return nil
}
func (proj *Project) init(opts initOpts) error {
headRef := plumbing.NewSymbolicReference(plumbing.HEAD, MainRefName)
if err := proj.GitRepo.Storer.SetReference(headRef); err != nil {
return fmt.Errorf("setting HEAD reference to %q: %w", MainRefName, err)
}
if opts.remote {
cfg, err := proj.GitRepo.Config()
if err != nil {
return fmt.Errorf("opening git cfg: %w", err)
}
cfg.Raw = cfg.Raw.AddOption("http", config.NoSubsection, "receivepack", "true")
if err := proj.GitRepo.Storer.SetConfig(cfg); err != nil {
return fmt.Errorf("storing modified git config: %w", err)
}
if err := proj.initRemotePreReceive(opts.bare); err != nil {
return fmt.Errorf("initializing pre-receive hook for remote-enabled repo: %w", err)
}
}
return nil
}
func (proj *Project) billyFilesystem() (billy.Filesystem, error) {
w, err := proj.GitRepo.Worktree()
if err != nil {
return nil, fmt.Errorf("opening git worktree: %w", err)
}
return w.Filesystem, nil
}
var errTraverseRefNoMatch = errors.New("failed to find reference matching given predicate")
// TraverseReferenceChain resolves a chain of references, calling the given
// predicate on each one, and returning the first one for which the predicate
// returns true. This method will return an error if it reaches the end of the
// chain and the predicate still has not returned true.
//
// If a reference name is encountered which does not actually exist, then it is
// assumed to be a hash reference to the zero hash.
func (proj *Project) TraverseReferenceChain(refName plumbing.ReferenceName, pred func(*plumbing.Reference) bool) (*plumbing.Reference, error) {
// TODO infinite loop checking
// TODO check that this (and the methods which use it) are actually useful
for {
ref, err := proj.GitRepo.Storer.Reference(refName)
if errors.Is(err, plumbing.ErrReferenceNotFound) {
ref = plumbing.NewHashReference(refName, plumbing.ZeroHash)
} else if err != nil {
return nil, fmt.Errorf("resolving reference %q: %w", refName, err)
}
if pred(ref) {
return ref, nil
} else if ref.Type() != plumbing.SymbolicReference {
return nil, errTraverseRefNoMatch
}
refName = ref.Target()
}
}
// ErrNoBranchReference is returned from ReferenceToBranchName if no reference
// in the reference chain is for a branch.
var ErrNoBranchReference = errors.New("no branch reference found")
// ReferenceToBranchName traverses a chain of references looking for the first
// branch reference, and returns that name, or returns ErrNoBranchReference if
// no branch reference is part of the chain.
func (proj *Project) ReferenceToBranchName(refName plumbing.ReferenceName) (plumbing.ReferenceName, error) {
// first check if the given refName is a branch, if so just return that.
if refName.IsBranch() {
return refName, nil
}
ref, err := proj.TraverseReferenceChain(refName, func(ref *plumbing.Reference) bool {
return ref.Target().IsBranch()
})
if errors.Is(err, errTraverseRefNoMatch) {
return "", ErrNoBranchReference
} else if err != nil {
return "", fmt.Errorf("traversing reference chain: %w", err)
}
return ref.Target(), nil
}
// ReferenceToHash fully resolves a reference to a hash. If a reference cannot
// be resolved then plumbing.ZeroHash is returned.
func (proj *Project) ReferenceToHash(refName plumbing.ReferenceName) (plumbing.Hash, error) {
ref, err := proj.TraverseReferenceChain(refName, func(ref *plumbing.Reference) bool {
return ref.Type() == plumbing.HashReference
})
if errors.Is(err, errTraverseRefNoMatch) {
return plumbing.ZeroHash, errors.New("no hash in reference chain (is this even possible???)")
} else if errors.Is(err, plumbing.ErrReferenceNotFound) {
return plumbing.ZeroHash, nil
} else if err != nil {
return plumbing.ZeroHash, fmt.Errorf("traversing reference chain: %w", err)
}
return ref.Hash(), nil
}
// headFS returns an FS based on the HEAD commit, or if there is no HEAD commit
// (it's an empty repo) an FS based on the raw filesystem.
func (proj *Project) headFS() (fs.FS, error) {
head, err := proj.GetHeadCommit()
if errors.Is(err, ErrHeadIsZero) {
bfs, err := proj.billyFilesystem()
if err != nil {
return nil, fmt.Errorf("getting underlying filesystem: %w", err)
}
return fs.FromBillyFilesystem(bfs), nil
} else if err != nil {
return nil, fmt.Errorf("could not get HEAD tree: %w", err)
}
return fs.FromTree(head.TreeObject), nil
}

@ -0,0 +1,289 @@
package dehub
import (
"bytes"
"errors"
"io"
"math/rand"
"path/filepath"
"testing"
"dehub.dev/src/dehub.git/sigcred"
"gopkg.in/src-d/go-git.v4"
"gopkg.in/src-d/go-git.v4/plumbing"
yaml "gopkg.in/yaml.v2"
)
type harness struct {
t *testing.T
rand *rand.Rand
proj *Project
cfg *Config
}
func newHarness(t *testing.T) *harness {
rand := rand.New(rand.NewSource(0xb4eadb01))
return &harness{
t: t,
rand: rand,
proj: InitMemProject(),
cfg: new(Config),
}
}
func (h *harness) stage(tree map[string]string) {
w, err := h.proj.GitRepo.Worktree()
if err != nil {
h.t.Fatal(err)
}
fs := w.Filesystem
for path, content := range tree {
if content == "" {
if _, err := w.Remove(path); err != nil {
h.t.Fatalf("removing %q: %v", path, err)
}
continue
}
dir := filepath.Dir(path)
if err := fs.MkdirAll(dir, 0666); err != nil {
h.t.Fatalf("making directory %q: %v", dir, err)
}
f, err := fs.Create(path)
if err != nil {
h.t.Fatalf("creating file %q: %v", path, err)
} else if _, err := io.Copy(f, bytes.NewBufferString(content)); err != nil {
h.t.Fatalf("writing to file %q: %v", path, err)
} else if err := f.Close(); err != nil {
h.t.Fatalf("closing file %q: %v", path, err)
} else if _, err := w.Add(path); err != nil {
h.t.Fatalf("adding file %q to index: %v", path, err)
}
}
}
func (h *harness) stageCfg() {
cfgBody, err := yaml.Marshal(h.cfg)
if err != nil {
h.t.Fatal(err)
}
h.stage(map[string]string{ConfigPath: string(cfgBody)})
}
func (h *harness) stageNewAccount(accountID string, anon bool) sigcred.Signifier {
sig, pubKeyBody := sigcred.TestSignifierPGP(accountID, anon, h.rand)
if !anon {
h.cfg.Accounts = append(h.cfg.Accounts, Account{
ID: accountID,
Signifiers: []sigcred.SignifierUnion{{PGPPublicKey: &sigcred.SignifierPGP{
Body: string(pubKeyBody),
}}},
})
h.stageCfg()
}
return sig
}
func (h *harness) stageAccessControls(aclYAML string) {
if err := yaml.Unmarshal([]byte(aclYAML), &h.cfg.AccessControls); err != nil {
h.t.Fatal(err)
}
h.stageCfg()
}
func (h *harness) checkout(branch plumbing.ReferenceName) {
w, err := h.proj.GitRepo.Worktree()
if err != nil {
h.t.Fatal(err)
}
head, err := h.proj.GetHeadCommit()
if errors.Is(err, ErrHeadIsZero) {
// if HEAD is not resolvable to any hash than the Checkout method
// doesn't work, just set HEAD manually.
ref := plumbing.NewSymbolicReference(plumbing.HEAD, branch)
if err := h.proj.GitRepo.Storer.SetReference(ref); err != nil {
h.t.Fatal(err)
}
return
} else if err != nil {
h.t.Fatal(err)
}
_, err = h.proj.GitRepo.Storer.Reference(branch)
if errors.Is(err, plumbing.ErrReferenceNotFound) {
err = w.Checkout(&git.CheckoutOptions{
Hash: head.Hash,
Branch: branch,
Create: true,
})
} else if err != nil {
h.t.Fatalf("checking if branch already exists: %v", branch)
} else {
err = w.Checkout(&git.CheckoutOptions{
Branch: branch,
})
}
if err != nil {
h.t.Fatalf("checking out branch: %v", err)
}
}
func (h *harness) reset(to plumbing.Hash, mode git.ResetMode) {
w, err := h.proj.GitRepo.Worktree()
if err != nil {
h.t.Fatal(err)
}
err = w.Reset(&git.ResetOptions{
Commit: to,
Mode: mode,
})
if err != nil {
h.t.Fatal(err)
}
}
type verifyExpectation int
const (
verifyShouldSucceed verifyExpectation = 1
verifyShouldFail verifyExpectation = 0
verifySkip verifyExpectation = -1
)
func (h *harness) tryCommit(
verifyExp verifyExpectation,
payUn PayloadUnion,
accountSig sigcred.Signifier,
) Commit {
if accountSig != nil {
var err error
if payUn, err = h.proj.AccreditPayload(payUn, accountSig); err != nil {
h.t.Fatalf("accrediting payload: %v", err)
}
}
commit, err := h.proj.Commit(payUn)
if err != nil {
h.t.Fatalf("committing PayloadChange: %v", err)
} else if verifyExp == verifySkip {
return commit
}
branch, err := h.proj.ReferenceToBranchName(plumbing.HEAD)
if err != nil {
h.t.Fatalf("determining checked out branch: %v", err)
}
shouldSucceed := verifyExp > 0
err = h.proj.VerifyCommits(branch, []Commit{commit})
if shouldSucceed && err != nil {
h.t.Fatalf("verifying commit %q: %v", commit.Hash, err)
} else if shouldSucceed {
return commit
} else if !shouldSucceed && err == nil {
h.t.Fatalf("verifying commit %q should have failed", commit.Hash)
}
var parentHash plumbing.Hash
if commit.Object.NumParents() > 0 {
parentHash = commit.Object.ParentHashes[0]
}
h.reset(parentHash, git.HardReset)
return commit
}
func (h *harness) assertCommitChange(
verifyExp verifyExpectation,
msg string,
sig sigcred.Signifier,
) Commit {
payUn, err := h.proj.NewPayloadChange(msg)
if err != nil {
h.t.Fatalf("creating PayloadChange: %v", err)
}
return h.tryCommit(verifyExp, payUn, sig)
}
func TestHasStagedChanges(t *testing.T) {
h := newHarness(t)
rootSig := h.stageNewAccount("root", false)
assertHasStaged := func(expHasStaged bool) {
hasStaged, err := h.proj.HasStagedChanges()
if err != nil {
t.Fatalf("error calling HasStagedChanges: %v", err)
} else if hasStaged != expHasStaged {
t.Fatalf("expected HasStagedChanges to return %v", expHasStaged)
}
}
// the harness starts with some staged changes
assertHasStaged(true)
h.stage(map[string]string{"foo": "bar"})
assertHasStaged(true)
h.assertCommitChange(verifyShouldSucceed, "first commit", rootSig)
assertHasStaged(false)
h.stage(map[string]string{"foo": ""}) // delete foo
assertHasStaged(true)
h.assertCommitChange(verifyShouldSucceed, "second commit", rootSig)
assertHasStaged(false)
}
// TestThisProjectStillVerifies opens this actual project and ensures that all
// commits in it still verify.
func TestThisProjectStillVerifies(t *testing.T) {
proj, err := OpenProject(".")
if err != nil {
t.Fatalf("error opening repo: %v", err)
}
headCommit, err := proj.GetHeadCommit()
if err != nil {
t.Fatalf("getting repo head: %v", err)
}
allCommits, err := proj.GetCommitRange(plumbing.ZeroHash, headCommit.Hash)
if err != nil {
t.Fatalf("getting all commits (up to %q): %v", headCommit.Hash, err)
}
checkedOutBranch, err := proj.ReferenceToBranchName(plumbing.HEAD)
if err != nil {
t.Fatalf("error determining checked out branch: %v", err)
}
if err := proj.VerifyCommits(checkedOutBranch, allCommits); err != nil {
t.Fatal(err)
}
}
func TestShortHashResolving(t *testing.T) {
// TODO ideally this test would test that conflicting hashes are noticed,
// but that's hard...
h := newHarness(t)
rootSig := h.stageNewAccount("root", false)
hash := h.assertCommitChange(verifyShouldSucceed, "first commit", rootSig).Hash
hashStr := hash.String()
t.Log(hashStr)
for i := 2; i < len(hashStr); i++ {
gotCommit, err := h.proj.GetCommitByRevision(plumbing.Revision(hashStr[:i]))
if err != nil {
t.Fatalf("resolving %q: %v", hashStr[:i], err)
} else if gotCommit.Hash != hash {
t.Fatalf("expected hash %q but got %q",
gotCommit.Hash, hash)
}
}
}

@ -1,104 +0,0 @@
// Package dehub TODO needs package docs
package dehub
import (
"dehub/fs"
"errors"
"fmt"
"path/filepath"
"gopkg.in/src-d/go-billy.v4"
"gopkg.in/src-d/go-billy.v4/memfs"
"gopkg.in/src-d/go-git.v4"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/object"
"gopkg.in/src-d/go-git.v4/storage/memory"
)
const (
// DehubDir defines the name of the directory where all dehub-related files are
// expected to be found.
DehubDir = ".dehub"
)
var (
// ConfigPath defines the expected path to the Repo's configuration file.
ConfigPath = filepath.Join(DehubDir, "config.yml")
)
// Repo is an object which allows accessing and modifying the dehub repo.
type Repo struct {
GitRepo *git.Repository
}
// OpenRepo opens the dehub repo in the given directory and returns the object
// for it.
//
// The given path is expected to have a git repo and .dehub folder already
// initialized.
func OpenRepo(path string) (*Repo, error) {
r := Repo{}
var err error
openOpts := &git.PlainOpenOptions{
DetectDotGit: true,
}
if r.GitRepo, err = git.PlainOpenWithOptions(path, openOpts); err != nil {
return nil, fmt.Errorf("could not open git repo: %w", err)
}
return &r, nil
}
// InitMemRepo initializes an empty repository which only exists in memory.
func InitMemRepo() *Repo {
r, err := git.Init(memory.NewStorage(), memfs.New())
if err != nil {
panic(err)
}
return &Repo{GitRepo: r}
}
func (r *Repo) billyFilesystem() (billy.Filesystem, error) {
w, err := r.GitRepo.Worktree()
if err != nil {
return nil, fmt.Errorf("could not open git worktree: %w", err)
}
return w.Filesystem, nil
}
func (r *Repo) head() (*object.Commit, *object.Tree, error) {
head, err := r.GitRepo.Head()
if err != nil {
return nil, nil, fmt.Errorf("could not get repo HEAD: %w", err)
}
headHash := head.Hash()
headCommit, err := r.GitRepo.CommitObject(headHash)
if err != nil {
return nil, nil, fmt.Errorf("could not get commit at HEAD (%q): %w", headHash, err)
}
headTree, err := r.GitRepo.TreeObject(headCommit.TreeHash)
if err != nil {
return nil, nil, fmt.Errorf("could not get tree object at HEAD (commit:%q tree:%q): %w",
headHash, headCommit.TreeHash, err)
}
return headCommit, headTree, nil
}
// headOrRawFS returns an FS based on the HEAD commit, or if there is no HEAD
// commit (it's an empty repo) an FS based on the raw filesystem.
func (r *Repo) headOrRawFS() (fs.FS, error) {
_, headTree, err := r.head()
if errors.Is(err, plumbing.ErrReferenceNotFound) {
bfs, err := r.billyFilesystem()
if err != nil {
return nil, fmt.Errorf("could not get underlying filesystem: %w", err)
}
return fs.FromBillyFilesystem(bfs), nil
} else if err != nil {
return nil, fmt.Errorf("could not get HEAD tree: %w", err)
}
return fs.FromTree(headTree), nil
}

@ -1,118 +0,0 @@
package dehub
import (
"bytes"
"dehub/accessctl"
"dehub/sigcred"
"io"
"math/rand"
"path/filepath"
"testing"
"gopkg.in/src-d/go-git.v4"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/object"
yaml "gopkg.in/yaml.v2"
)
type harness struct {
t *testing.T
rand *rand.Rand
repo *Repo
cfg *Config
sig sigcred.SignifierInterface
}
func newHarness(t *testing.T) *harness {
rand := rand.New(rand.NewSource(0xb4eadb01))
sig, pubKeyBody := sigcred.SignifierPGPTmp(rand)
pubKeyPath := filepath.Join(DehubDir, "root.asc")
cfg := &Config{
Accounts: []Account{{
ID: "root",
Signifiers: []sigcred.Signifier{{PGPPublicKeyFile: &sigcred.SignifierPGPFile{
Path: pubKeyPath,
}}},
}},
AccessControls: []accessctl.AccessControl{
{
Pattern: "**",
Condition: accessctl.Condition{
Signature: &accessctl.ConditionSignature{
AccountIDs: []string{"root"},
Count: "100%",
},
},
},
},
}
cfgBody, err := yaml.Marshal(cfg)
if err != nil {
t.Fatal(err)
}
h := &harness{
t: t,
rand: rand,
repo: InitMemRepo(),
cfg: cfg,
sig: sig,
}
h.stage(map[string]string{
ConfigPath: string(cfgBody),
pubKeyPath: string(pubKeyBody),
})
return h
}
func (h *harness) stage(tree map[string]string) {
w, err := h.repo.GitRepo.Worktree()
if err != nil {
h.t.Fatal(err)
}
fs := w.Filesystem
for path, content := range tree {
if content == "" {
if _, err := w.Remove(path); err != nil {
h.t.Fatalf("error removing %q: %v", path, err)
}
continue
}
dir := filepath.Dir(path)
if err := fs.MkdirAll(dir, 0666); err != nil {
h.t.Fatalf("error making directory %q: %v", dir, err)
}
f, err := fs.Create(path)
if err != nil {
h.t.Fatalf("error creating file %q: %v", path, err)
} else if _, err := io.Copy(f, bytes.NewBufferString(content)); err != nil {
h.t.Fatalf("error writing to file %q: %v", path, err)
} else if err := f.Close(); err != nil {
h.t.Fatalf("error closing file %q: %v", path, err)
} else if _, err := w.Add(path); err != nil {
h.t.Fatalf("error adding file %q to index: %v", path, err)
}
}
}
func (h *harness) commit(msg string) plumbing.Hash {
w, err := h.repo.GitRepo.Worktree()
if err != nil {
h.t.Fatal(err)
}
hash, err := w.Commit(msg, &git.CommitOptions{
Author: &object.Signature{Name: "god"},
})
if err != nil {
h.t.Fatal(err)
}
return hash
}

@ -1,25 +1,77 @@
package sigcred
import "dehub/typeobj"
import (
"fmt"
// Credential represents a credential which has been attached to a commit which
// hopefully will allow it to be included in the master branch. Exactly one
// field tagged with "type" should be set.
type Credential struct {
"dehub.dev/src/dehub.git/typeobj"
)
// CredentialUnion represents a credential, signifying a user's approval of a
// payload. Exactly one field tagged with "type" should be set.
type CredentialUnion struct {
PGPSignature *CredentialPGPSignature `type:"pgp_signature"`
// AccountID specifies the account which generated this Credential. The
// Credentials produced by the Signifier.Sign method do not fill this field
// in.
AccountID string `yaml:"account"`
// AccountID specifies the account which generated this CredentialUnion.
//
// NOTE that credentials produced by the direct implementations of Signifier
// won't fill in this field, unless specifically documented. The Signifier
// produced by the Signifier() method of SignifierUnion _will_ fill this
// field in, however.
AccountID string `yaml:"account,omitempty"`
// AnonID specifies an identifier for the anonymous user which produced this
// credential. This field is mutually exclusive with AccountID, and won't be
// set by any Signifier implementation unless specifically documented.
AnonID string `yaml:"-"`
}
// MarshalYAML implements the yaml.Marshaler interface.
func (c Credential) MarshalYAML() (interface{}, error) {
func (c CredentialUnion) MarshalYAML() (interface{}, error) {
return typeobj.MarshalYAML(c)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *Credential) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *CredentialUnion) UnmarshalYAML(unmarshal func(interface{}) error) error {
return typeobj.UnmarshalYAML(c, unmarshal)
}
// ErrNotSelfVerifying is returned from the SelfVerify method of CredentialUnion
// when the credential does not implement the SelfVerifyingCredential interface.
// It may also be returned from the SelfVerify method of the
// SelfVerifyingCredential itself, if the credential can only self-verify under
// certain circumstances.
type ErrNotSelfVerifying struct {
// Subject is a descriptor of the value which could not be verified. It may
// be a type name or some other identifying piece of information.
Subject string
}
func (e ErrNotSelfVerifying) Error() string {
return fmt.Sprintf("%s cannot verify itself", e.Subject)
}
// SelfVerify will attempt to cast the credential as a SelfVerifyingCredential,
// and returns the result of the SelfVerify method being called on it.
func (c CredentialUnion) SelfVerify(data []byte) error {
el, _, err := typeobj.Element(c)
if err != nil {
return err
} else if selfVerifyingCred, ok := el.(SelfVerifyingCredential); !ok {
return ErrNotSelfVerifying{Subject: fmt.Sprintf("credential of type %T", el)}
} else if err := selfVerifyingCred.SelfVerify(data); err != nil {
return fmt.Errorf("self-verifying credential of type %T: %w", el, err)
}
return nil
}
// SelfVerifyingCredential is one which is able to prove its own authenticity by
// some means or another. It is not required for a Credential to implement this
// interface.
type SelfVerifyingCredential interface {
// SelfVerify should return nil if the Credential has successfully verified
// that it has accredited the given data, or an error describing why it
// could not do so. It may return ErrNotSelfVerifying if the Credential can
// only self-verify under certain circumstances, and those circumstances are
// not met.
SelfVerify(data []byte) error
}

@ -0,0 +1,58 @@
package sigcred
import (
"errors"
"math/rand"
"testing"
"time"
)
func TestSelfVerifyingCredentials(t *testing.T) {
seed := time.Now().UnixNano()
t.Logf("seed: %d", seed)
rand := rand.New(rand.NewSource(seed))
tests := []struct {
descr string
mkCred func(toSign []byte) (CredentialUnion, error)
expErr bool
}{
{
descr: "pgp sig no body",
mkCred: func(toSign []byte) (CredentialUnion, error) {
privKey, _ := TestSignifierPGP("", false, rand)
return privKey.Sign(nil, toSign)
},
expErr: true,
},
{
descr: "pgp sig with body",
mkCred: func(toSign []byte) (CredentialUnion, error) {
privKey, _ := TestSignifierPGP("", true, rand)
return privKey.Sign(nil, toSign)
},
},
}
for _, test := range tests {
t.Run(test.descr, func(t *testing.T) {
data := make([]byte, rand.Intn(1024))
if _, err := rand.Read(data); err != nil {
t.Fatal(err)
}
cred, err := test.mkCred(data)
if err != nil {
t.Fatal(err)
}
err = cred.SelfVerify(data)
isNotSelfVerifying := errors.As(err, new(ErrNotSelfVerifying))
if test.expErr && !isNotSelfVerifying {
t.Fatalf("expected ErrNotSelfVerifying but got: %v", err)
} else if !test.expErr && err != nil {
t.Fatalf("unexpected error: %v", err)
}
})
}
}

@ -3,19 +3,19 @@ package sigcred
import (
"bytes"
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/sha256"
"dehub/fs"
"dehub/yamlutil"
"errors"
"fmt"
"io"
"io/ioutil"
"os/exec"
"path/filepath"
"strings"
"time"
"dehub.dev/src/dehub.git/fs"
"dehub.dev/src/dehub.git/yamlutil"
"golang.org/x/crypto/openpgp"
"golang.org/x/crypto/openpgp/armor"
"golang.org/x/crypto/openpgp/packet"
)
@ -23,43 +23,78 @@ import (
// CredentialPGPSignature describes a PGP signature which has been used to sign
// a commit.
type CredentialPGPSignature struct {
PubKeyID string `yaml:"pub_key_id"`
Body yamlutil.Blob `yaml:"body"`
PubKeyID string `yaml:"pub_key_id"`
PubKeyBody string `yaml:"pub_key_body,omitempty"`
Body yamlutil.Blob `yaml:"body"`
}
// SelfVerify will only work if PubKeyBody is filled in. If so, Body will
// attempt to be verified by that public key.
func (c *CredentialPGPSignature) SelfVerify(data []byte) error {
if c.PubKeyBody == "" {
return ErrNotSelfVerifying{
Subject: "PGP signature Credential with no pub_key_body field",
}
}
sig := SignifierPGP{Body: c.PubKeyBody}
return sig.Verify(nil, data, CredentialUnion{PGPSignature: c})
}
type pgpPubKey struct {
pubKey *packet.PublicKey
type pgpKey struct {
entity *openpgp.Entity
}
func newPGPPubKey(r io.Reader) (pgpPubKey, error) {
func newPGPPubKey(r io.Reader) (pgpKey, error) {
// TODO support non-armored keys as well
block, err := armor.Decode(r)
if err != nil {
return pgpPubKey{}, fmt.Errorf("could not decode armored PGP public key: %w", err)
return pgpKey{}, fmt.Errorf("could not decode armored PGP public key: %w", err)
}
pkt, err := packet.Read(block.Body)
entity, err := openpgp.ReadEntity(packet.NewReader(block.Body))
if err != nil {
return pgpPubKey{}, fmt.Errorf("could not read PGP public key: %w", err)
return pgpKey{}, fmt.Errorf("could not read PGP public key: %w", err)
}
return pgpKey{entity: entity}, nil
}
pubKey, ok := pkt.(*packet.PublicKey)
if !ok {
return pgpPubKey{}, fmt.Errorf("packet is not a public key, it's a %T", pkt)
func (s pgpKey) Sign(_ fs.FS, data []byte) (CredentialUnion, error) {
if s.entity.PrivateKey == nil {
return CredentialUnion{}, errors.New("private key not loaded")
}
return pgpPubKey{pubKey: pubKey}, nil
h := sha256.New()
h.Write(data)
var sig packet.Signature
sig.Hash = crypto.SHA256
sig.PubKeyAlgo = s.entity.PrimaryKey.PubKeyAlgo
if err := sig.Sign(h, s.entity.PrivateKey, nil); err != nil {
return CredentialUnion{}, fmt.Errorf("signing data: %w", err)
}
body := new(bytes.Buffer)
if err := sig.Serialize(body); err != nil {
return CredentialUnion{}, fmt.Errorf("serializing signature: %w", err)
}
return CredentialUnion{
PGPSignature: &CredentialPGPSignature{
PubKeyID: s.entity.PrimaryKey.KeyIdString(),
Body: body.Bytes(),
},
}, nil
}
func (s pgpPubKey) Signed(_ fs.FS, cred Credential) (bool, error) {
func (s pgpKey) Signed(_ fs.FS, cred CredentialUnion) (bool, error) {
if cred.PGPSignature == nil {
return false, nil
}
return cred.PGPSignature.PubKeyID == s.pubKey.KeyIdString(), nil
return cred.PGPSignature.PubKeyID == s.entity.PrimaryKey.KeyIdString(), nil
}
func (s pgpPubKey) Verify(_ fs.FS, data []byte, cred Credential) error {
func (s pgpKey) Verify(_ fs.FS, data []byte, cred CredentialUnion) error {
credSig := cred.PGPSignature
if credSig == nil {
return fmt.Errorf("SignifierPGPFile cannot verify %+v", cred)
@ -80,200 +115,206 @@ func (s pgpPubKey) Verify(_ fs.FS, data []byte, cred Credential) error {
// package expects you to do it yourself.
h := sigPkt.Hash.New()
h.Write(data)
return s.pubKey.VerifySignature(h, sigPkt)
return s.entity.PrimaryKey.VerifySignature(h, sigPkt)
}
func (s pgpPubKey) encode() ([]byte, error) {
func (s pgpKey) MarshalBinary() ([]byte, error) {
body := new(bytes.Buffer)
armorEncoder, err := armor.Encode(body, "PGP PUBLIC KEY", nil)
if err != nil {
return nil, fmt.Errorf("error initializing armor encoder: %w", err)
} else if err := s.pubKey.Serialize(armorEncoder); err != nil {
return nil, fmt.Errorf("error encoding public key: %w", err)
return nil, fmt.Errorf("initializing armor encoder: %w", err)
} else if err := s.entity.Serialize(armorEncoder); err != nil {
return nil, fmt.Errorf("encoding public key: %w", err)
} else if err := armorEncoder.Close(); err != nil {
return nil, fmt.Errorf("error closing armor encoder: %w", err)
return nil, fmt.Errorf("closing armor encoder: %w", err)
}
return body.Bytes(), nil
}
func (s pgpPubKey) asSignfier() (SignifierPGP, error) {
body, err := s.encode()
if err != nil {
return SignifierPGP{}, err
func (s pgpKey) userID() (*packet.UserId, error) {
if l := len(s.entity.Identities); l == 0 {
return nil, errors.New("pgp key has no identity information")
} else if l > 1 {
return nil, errors.New("multiple identities on a single pgp key is unsupported")
}
return SignifierPGP{
Body: string(body),
}, nil
var identity *openpgp.Identity
for _, identity = range s.entity.Identities {
break
}
return identity.UserId, nil
}
type pgpPrivKey struct {
pgpPubKey
privKey *packet.PrivateKey
}
func anonPGPSignifier(pgpKey pgpKey, sig Signifier) (Signifier, error) {
keyID := pgpKey.entity.PrimaryKey.KeyIdString()
userID, err := pgpKey.userID()
if err != nil {
return nil, err
}
// SignifierPGPTmp returns a direct implementation of the SignifierInterface
// which uses a random private key generated in memory, as well as an armored
// version of its public key.
func SignifierPGPTmp(randReader io.Reader) (SignifierInterface, []byte) {
rawPrivKey, err := ecdsa.GenerateKey(elliptic.P521(), randReader)
pubKeyBody, err := pgpKey.MarshalBinary()
if err != nil {
panic(err)
return nil, err
}
privKeyRaw := packet.NewECDSAPrivateKey(time.Now(), rawPrivKey)
privKey := pgpPrivKey{
pgpPubKey: pgpPubKey{
pubKey: &privKeyRaw.PublicKey,
return signifierMiddleware{
Signifier: sig,
signCallback: func(cred *CredentialUnion) {
cred.PGPSignature.PubKeyBody = string(pubKeyBody)
cred.AnonID = fmt.Sprintf("%s %q", keyID, userID.Email)
},
privKey: privKeyRaw,
}
}, nil
}
pubKeyBody, err := privKey.pgpPubKey.encode()
// TestSignifierPGP returns a direct implementation of Signifier which uses a
// random private key generated in memory, as well as an armored version of its
// public key.
//
// NOTE that the key returned is very weak, and should only be used for tests.
func TestSignifierPGP(name string, anon bool, randReader io.Reader) (Signifier, []byte) {
entity, err := openpgp.NewEntity(name, "", name+"@example.com", &packet.Config{
Rand: randReader,
RSABits: 512,
})
if err != nil {
panic(err)
}
return privKey, pubKeyBody
}
func (s pgpPrivKey) Sign(_ fs.FS, data []byte) (Credential, error) {
h := sha256.New()
h.Write(data)
var sig packet.Signature
sig.Hash = crypto.SHA256
sig.PubKeyAlgo = s.pubKey.PubKeyAlgo
if err := sig.Sign(h, s.privKey, nil); err != nil {
return Credential{}, fmt.Errorf("failed to sign data: %w", err)
pgpKey := pgpKey{entity: entity}
pubKeyBody, err := pgpKey.MarshalBinary()
if err != nil {
panic(err)
}
body := new(bytes.Buffer)
if err := sig.Serialize(body); err != nil {
return Credential{}, fmt.Errorf("failed to serialize signature: %w", err)
if anon {
sigInt, err := anonPGPSignifier(pgpKey, pgpKey)
if err != nil {
panic(err)
}
return sigInt, pubKeyBody
}
return Credential{
PGPSignature: &CredentialPGPSignature{
PubKeyID: s.pubKey.KeyIdString(),
Body: body.Bytes(),
},
}, nil
return accountSignifier(name, pgpKey), pubKeyBody
}
// SignifierPGP describes a pgp public key whose corresponding private key will
// be used as a signing key.
// be used as a signing key. The public key can be described by one of multiple
// fields, each being a different method of loading the public key. Only one
// field should be set.
type SignifierPGP struct {
Body string `yaml:"body"`
}
var _ SignifierInterface = SignifierPGP{}
// An armored string encoding of the public key, as exported via
// `gpg -a --export <key-id>`
Body string `yaml:"body,omitempty"`
func (s SignifierPGP) load() (pgpPubKey, error) {
return newPGPPubKey(strings.NewReader(s.Body))
// Path, relative to the root of the repo, of the armored public key file.
Path string `yaml:"path,omitempty"`
}
// Sign will sign the given arbitrary bytes using the private key corresponding
// to the pgp public key embedded in this Signifier.
func (s SignifierPGP) Sign(fs fs.FS, data []byte) (Credential, error) {
sigPGP, err := s.load()
if err != nil {
return Credential{}, err
}
var _ Signifier = SignifierPGP{}
func cmdGPG(stdin []byte, args ...string) ([]byte, error) {
args = append([]string{"--openpgp"}, args...)
stderr := new(bytes.Buffer)
cmd := exec.Command("gpg",
"--openpgp",
"--detach-sign",
"--local-user", sigPGP.pubKey.KeyIdString())
cmd.Stdin = bytes.NewBuffer(data)
cmd := exec.Command("gpg", args...)
cmd.Stdin = bytes.NewBuffer(stdin)
cmd.Stderr = stderr
sig, err := cmd.Output()
out, err := cmd.Output()
if err != nil {
return Credential{}, fmt.Errorf("error signing with gpg (%v): %s", err, stderr.String())
return nil, fmt.Errorf("calling gpg command (%v): %s", err, stderr.String())
}
return Credential{
PGPSignature: &CredentialPGPSignature{
PubKeyID: sigPGP.pubKey.KeyIdString(),
Body: sig,
},
}, nil
return out, nil
}
// Signed returns true if the private key corresponding to the pgp public key
// embedded in this Signifier was used to produce the given Credential.
func (s SignifierPGP) Signed(fs fs.FS, cred Credential) (bool, error) {
sigPGP, err := s.load()
// LoadSignifierPGP loads a pgp key using the given identifier. The key is
// assumed to be stored in the client's keyring already.
//
// If this is being called for an anonymous user to use, then anon can be set to
// true. This will have the effect of setting the PubKeyBody and AnonID of all
// produced credentials.
func LoadSignifierPGP(keyID string, anon bool) (Signifier, error) {
pubKey, err := cmdGPG(nil, "-a", "--export", keyID)
if err != nil {
return false, err
return nil, fmt.Errorf("loading public key: %w", err)
} else if len(pubKey) == 0 {
return nil, fmt.Errorf("no public key found for %q", keyID)
}
return sigPGP.Signed(fs, cred)
}
sig := &SignifierPGP{Body: string(pubKey)}
if !anon {
return sig, nil
}
// Verify asserts that the given signature was produced by this key signing the
// given piece of data.
func (s SignifierPGP) Verify(fs fs.FS, data []byte, cred Credential) error {
sigPGP, err := s.load()
pgpKey, err := sig.load(nil)
if err != nil {
return err
return nil, err
}
return sigPGP.Verify(fs, data, cred)
return anonPGPSignifier(pgpKey, sig)
}
// SignifierPGPFile is the same as SignifierPGP, except that the public key is
// found in the repo rather than encoded into the object.
type SignifierPGPFile struct {
Path string `yaml:"path"`
}
var _ SignifierInterface = SignifierPGPFile{}
func (s SignifierPGP) load(fs fs.FS) (pgpKey, error) {
if s.Body != "" {
return newPGPPubKey(strings.NewReader(s.Body))
}
func (s SignifierPGPFile) load(fs fs.FS) (SignifierPGP, error) {
path := filepath.Clean(s.Path)
fr, err := fs.Open(path)
if err != nil {
return SignifierPGP{}, fmt.Errorf("could not open PGP public key file at %q: %w", path, err)
return pgpKey{}, fmt.Errorf("opening PGP public key file at %q: %w", path, err)
}
defer fr.Close()
pubKeyB, err := ioutil.ReadAll(fr)
if err != nil {
return SignifierPGP{}, fmt.Errorf("could not read PGP public key from file blob at %q: %w", s.Path, err)
return pgpKey{}, fmt.Errorf("reading PGP public key from file at %q: %w", s.Path, err)
}
return SignifierPGP{Body: string(pubKeyB)}, nil
return SignifierPGP{Body: string(pubKeyB)}.load(fs)
}
// Sign will sign the given arbitrary bytes using the private key corresponding
// to the pgp public key located by this Signifier.
func (s SignifierPGPFile) Sign(fs fs.FS, data []byte) (Credential, error) {
// to the pgp public key embedded in this Signifier.
func (s SignifierPGP) Sign(fs fs.FS, data []byte) (CredentialUnion, error) {
sigPGP, err := s.load(fs)
if err != nil {
return Credential{}, err
return CredentialUnion{}, err
}
return sigPGP.Sign(fs, data)
}
// Signed returns true if the private key corresponding to the pgp public key
// located by this Signifier was used to produce the given Credential.
func (s SignifierPGPFile) Signed(fs fs.FS, cred Credential) (bool, error) {
if cred.PGPSignature == nil {
return false, nil
keyID := sigPGP.entity.PrimaryKey.KeyIdString()
sig, err := cmdGPG(data, "--detach-sign", "--local-user", keyID)
if err != nil {
return CredentialUnion{}, fmt.Errorf("signing with pgp key: %w", err)
}
return CredentialUnion{
PGPSignature: &CredentialPGPSignature{
PubKeyID: keyID,
Body: sig,
},
}, nil
}
// Signed returns true if the private key corresponding to the pgp public key
// embedded in this Signifier was used to produce the given Credential.
func (s SignifierPGP) Signed(fs fs.FS, cred CredentialUnion) (bool, error) {
sigPGP, err := s.load(fs)
if err != nil {
return false, err
}
return sigPGP.Signed(fs, cred)
}
// Verify asserts that the given signature was produced by this key signing the
// given piece of data.
func (s SignifierPGPFile) Verify(fs fs.FS, data []byte, cred Credential) error {
func (s SignifierPGP) Verify(fs fs.FS, data []byte, cred CredentialUnion) error {
sigPGP, err := s.load(fs)
if err != nil {
return err
}
return sigPGP.Verify(fs, data, cred)
}
// SignifierPGPFile is deprecated and should not be used, use the Path field of
// SignifierPGP instead.
type SignifierPGPFile struct {
Path string `yaml:"path"`
}

@ -1,10 +1,11 @@
package sigcred
import (
"dehub/fs"
"math/rand"
"testing"
"time"
"dehub.dev/src/dehub.git/fs"
)
// There are not currently tests for testing pgp signature creation, as they
@ -14,21 +15,20 @@ import (
func TestPGPVerification(t *testing.T) {
tests := []struct {
descr string
init func(pubKeyBody []byte) (SignifierInterface, fs.FS)
init func(pubKeyBody []byte) (Signifier, fs.FS)
}{
{
descr: "SignifierPGP",
init: func(pubKeyBody []byte) (SignifierInterface, fs.FS) {
descr: "SignifierPGP Body",
init: func(pubKeyBody []byte) (Signifier, fs.FS) {
return SignifierPGP{Body: string(pubKeyBody)}, nil
},
},
{
descr: "SignifierPGPFile",
init: func(pubKeyBody []byte) (SignifierInterface, fs.FS) {
descr: "SignifierPGP Path",
init: func(pubKeyBody []byte) (Signifier, fs.FS) {
pubKeyPath := "some/dir/pubkey.asc"
fs := fs.Stub{pubKeyPath: pubKeyBody}
sigPGPFile := SignifierPGPFile{Path: pubKeyPath}
return sigPGPFile, fs
return SignifierPGP{Path: pubKeyPath}, fs
},
},
}
@ -38,7 +38,7 @@ func TestPGPVerification(t *testing.T) {
seed := time.Now().UnixNano()
t.Logf("seed: %d", seed)
rand := rand.New(rand.NewSource(seed))
privKey, pubKeyBody := SignifierPGPTmp(rand)
privKey, pubKeyBody := TestSignifierPGP("", false, rand)
sig, fs := test.init(pubKeyBody)
data := make([]byte, rand.Intn(1024))

@ -1,50 +1,95 @@
package sigcred
import (
"dehub/fs"
"dehub/typeobj"
"dehub.dev/src/dehub.git/fs"
"dehub.dev/src/dehub.git/typeobj"
)
// Signifier reprsents a single signing method being defined in the Config. Only
// one field should be set on each Signifier.
type Signifier struct {
PGPPublicKey *SignifierPGP `type:"pgp_public_key"`
PGPPublicKeyFile *SignifierPGPFile `type:"pgp_public_key_file"`
// Signifier describes the methods that all signifiers must implement.
type Signifier interface {
// Sign returns a credential containing a signature of the given data.
//
// tree can be used to find the Signifier at a particular snapshot.
Sign(fs.FS, []byte) (CredentialUnion, error)
// Signed returns true if the Signifier was used to sign the credential.
Signed(fs.FS, CredentialUnion) (bool, error)
// Verify asserts that the Signifier produced the given credential for the
// given data set, or returns an error.
//
// tree can be used to find the Signifier at a particular snapshot.
Verify(fs.FS, []byte, CredentialUnion) error
}
// SignifierUnion represents a single signifier for an account. Only one field
// should be set on each SignifierUnion.
type SignifierUnion struct {
PGPPublicKey *SignifierPGP `type:"pgp_public_key"`
// LegacyPGPPublicKeyFile is deprecated, only PGPPublicKey should be used
LegacyPGPPublicKeyFile *SignifierPGPFile `type:"pgp_public_key_file"`
}
// MarshalYAML implements the yaml.Marshaler interface.
func (s Signifier) MarshalYAML() (interface{}, error) {
func (s SignifierUnion) MarshalYAML() (interface{}, error) {
return typeobj.MarshalYAML(s)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (s *Signifier) UnmarshalYAML(unmarshal func(interface{}) error) error {
return typeobj.UnmarshalYAML(s, unmarshal)
func (s *SignifierUnion) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err := typeobj.UnmarshalYAML(s, unmarshal); err != nil {
return err
}
// TODO deprecate PGPPublicKeyFile
if s.LegacyPGPPublicKeyFile != nil {
s.PGPPublicKey = &SignifierPGP{Path: s.LegacyPGPPublicKeyFile.Path}
s.LegacyPGPPublicKeyFile = nil
}
return nil
}
// Interface returns the SignifierInterface instance encapsulated by this
// Signifier object.
func (s Signifier) Interface() (SignifierInterface, error) {
// Signifier returns the Signifier instance encapsulated by this SignifierUnion.
//
// This will panic if no Signifier field is populated.
//
// accountID is given so as to automatically fill the AccountID field of
// credentials returned from Sign, since the underlying implementation doesn't
// know what account it's signing for.
func (s SignifierUnion) Signifier(accountID string) Signifier {
el, _, err := typeobj.Element(s)
if err != nil {
return nil, err
panic(err)
}
return el.(SignifierInterface), nil
return accountSignifier(accountID, el.(Signifier))
}
// SignifierInterface describes the methods that all Signifiers must implement.
type SignifierInterface interface {
// Sign returns a Credential containing a signature of the given data.
//
// tree can be used to find the Signifier at a particular snapshot.
Sign(fs fs.FS, data []byte) (Credential, error)
type signifierMiddleware struct {
Signifier
signCallback func(*CredentialUnion)
}
// Signed returns true if the Signifier was used to sign the Credential.
Signed(fs fs.FS, cred Credential) (bool, error)
func (sm signifierMiddleware) Sign(fs fs.FS, data []byte) (CredentialUnion, error) {
cred, err := sm.Signifier.Sign(fs, data)
if err != nil || sm.signCallback == nil {
return cred, err
}
sm.signCallback(&cred)
return cred, nil
}
// Verify asserts that the Signifier produced the given Credential for the
// given data set, or returns an error.
//
// tree can be used to find the Signifier at a particular snapshot.
Verify(fs fs.FS, data []byte, cred Credential) error
// accountSignifier wraps a Signifier to always set the accountID field on
// credentials it produces via the Sign method.
//
// TODO accountSignifier shouldn't be necessary, it's very ugly. It indicates
// that CredentialUnion probably shouldn't have AccountID on it, which makes
// sense. Some refactoring is required here.
func accountSignifier(accountID string, sig Signifier) Signifier {
return signifierMiddleware{
Signifier: sig,
signCallback: func(cred *CredentialUnion) {
cred.AccountID = accountID
},
}
}

@ -14,58 +14,121 @@ import (
"errors"
"fmt"
"reflect"
"strings"
)
type tagInfo struct {
val string
isDefault bool
}
func parseTag(tag string) tagInfo {
parts := strings.Split(tag, ",")
return tagInfo{
val: parts[0],
isDefault: len(parts) > 1 && parts[1] == "default",
}
}
// structTypeWithYAMLTags takes a type of kind struct and returns that same
// type, except all fields with a "type" tag will also have a `yaml:"-"` tag
// attached.
func structTypeWithYAMLTags(typ reflect.Type) (reflect.Type, error) {
n := typ.NumField()
outFields := make([]reflect.StructField, n)
for i := 0; i < n; i++ {
field := typ.Field(i)
hasTypeTag := field.Tag.Get("type") != ""
if hasTypeTag && field.Tag.Get("yaml") != "" {
return nil, fmt.Errorf("field %s has yaml tag and type tag", field.Name)
} else if hasTypeTag {
field.Tag += ` yaml:"-"`
}
outFields[i] = field
}
return reflect.StructOf(outFields), nil
}
func findTypeField(val reflect.Value, targetTypeTag string) (reflect.Value, reflect.StructField, error) {
typ := val.Type()
var defVal reflect.Value
var defTyp reflect.StructField
var defOk bool
for i := 0; i < val.NumField(); i++ {
fieldVal, fieldTyp := val.Field(i), typ.Field(i)
tagInfo := parseTag(fieldTyp.Tag.Get("type"))
if targetTypeTag != "" && tagInfo.val == targetTypeTag {
return fieldVal, fieldTyp, nil
} else if targetTypeTag == "" && tagInfo.isDefault {
defVal, defTyp, defOk = fieldVal, fieldTyp, true
}
}
if targetTypeTag == "" && defOk {
return defVal, defTyp, nil
} else if targetTypeTag == "" {
return reflect.Value{}, reflect.StructField{}, errors.New("type field not set")
}
return reflect.Value{}, reflect.StructField{}, fmt.Errorf("invalid type value %q", targetTypeTag)
}
// UnmarshalYAML is intended to be used within the UnmarshalYAML method of a
// union struct. It will use the given input data's "type" field and match that
// to the struct field tagged with that value. it will then unmarshal the input
// data into that inner field.
func UnmarshalYAML(i interface{}, unmarshal func(interface{}) error) error {
val := reflect.Indirect(reflect.ValueOf(i))
if !val.CanSet() {
return fmt.Errorf("cannot unmarshal into value of type %T", i)
if !val.CanSet() || val.Kind() != reflect.Struct {
return fmt.Errorf("cannot unmarshal into value of type %T: must be a struct pointer", i)
}
// create a copy of the struct type, with `yaml:"-"` tags added to all
// fields with `type:"..."` tags. If we didn't do this then there would be
// conflicts in the next step if a type field's name was the same as one of
// its inner field names.
valTypeCP, err := structTypeWithYAMLTags(val.Type())
if err != nil {
return fmt.Errorf("cannot unmarshal into value of type %T: %w", i, err)
}
// unmarshal in all non-typeobj fields. construct a type which wraps the
// given one, hiding its UnmarshalYAML method (if it has one), and unmarshal
// onto that directly. The "type" field is also unmarshaled at this stage.
valWrap := reflect.New(reflect.StructOf([]reflect.StructField{
reflect.StructField{
Name: "Type",
Type: typeOfString,
Tag: `yaml:"type"`,
},
{
Name: "Val",
Type: val.Type(),
Tag: `yaml:",inline"`,
},
{Name: "Type", Type: typeOfString, Tag: `yaml:"type"`},
{Name: "Val", Type: valTypeCP, Tag: `yaml:",inline"`},
}))
if err := unmarshal(valWrap.Interface()); err != nil {
return err
}
typeVal := valWrap.Elem().Field(0).String()
val.Set(valWrap.Elem().Field(1))
typ := val.Type()
for i := 0; i < val.NumField(); i++ {
fieldVal, fieldTyp := val.Field(i), typ.Field(i)
if fieldTyp.Tag.Get("type") != typeVal {
// set non-type fields into the original value
valWrapInnerVal := valWrap.Elem().Field(1)
for i := 0; i < valWrapInnerVal.NumField(); i++ {
fieldVal, fieldTyp := valWrapInnerVal.Field(i), valTypeCP.Field(i)
if fieldTyp.Tag.Get("type") != "" {
continue
}
val.Field(i).Set(fieldVal)
}
var valInto interface{}
if fieldVal.Kind() == reflect.Ptr {
newFieldVal := reflect.New(fieldTyp.Type.Elem())
fieldVal.Set(newFieldVal)
valInto = newFieldVal.Interface()
} else {
valInto = fieldVal.Addr().Interface()
}
return unmarshal(valInto)
typeVal := valWrap.Elem().Field(0).String()
fieldVal, fieldTyp, err := findTypeField(val, typeVal)
if err != nil {
return err
}
return fmt.Errorf("invalid type value %q", typeVal)
var valInto interface{}
if fieldVal.Kind() == reflect.Ptr {
newFieldVal := reflect.New(fieldTyp.Type.Elem())
fieldVal.Set(newFieldVal)
valInto = newFieldVal.Interface()
} else {
valInto = fieldVal.Addr().Interface()
}
return unmarshal(valInto)
}
// val should be of kind struct
@ -78,18 +141,18 @@ func element(val reflect.Value) (reflect.Value, string, []int, error) {
nonTypeFields := make([]int, 0, numFields)
for i := 0; i < numFields; i++ {
innerFieldVal := val.Field(i)
innerTypeTag := typ.Field(i).Tag.Get("type")
if innerTypeTag == "" {
innerTagInfo := parseTag(typ.Field(i).Tag.Get("type"))
if innerTagInfo.val == "" {
nonTypeFields = append(nonTypeFields, i)
} else if innerFieldVal.IsZero() {
continue
} else {
fieldVal = innerFieldVal
typeTag = innerTypeTag
typeTag = innerTagInfo.val
}
}
if fieldVal.IsZero() {
if !fieldVal.IsValid() {
return reflect.Value{}, "", nil, errors.New(`no non-zero fields tagged with "type"`)
}
return fieldVal, typeTag, nonTypeFields, nil

@ -2,6 +2,7 @@ package typeobj
import (
"reflect"
"strings"
"testing"
"github.com/davecgh/go-spew/spew"
@ -16,9 +17,15 @@ type bar struct {
B int `yaml:"b"`
}
// baz has a field of the same name as the type, which is tricky
type baz struct {
Baz int `yaml:"baz"`
}
type outer struct {
Foo foo `type:"foo"`
Bar *bar `type:"bar"`
Baz baz `type:"baz"`
Other string `yaml:"other_field,omitempty"`
}
@ -31,83 +38,131 @@ func (o *outer) UnmarshalYAML(unmarshal func(interface{}) error) error {
return UnmarshalYAML(o, unmarshal)
}
type outerWDefault struct {
Foo foo `type:"foo,default"`
Bar *bar `type:"bar"`
}
func (o outerWDefault) MarshalYAML() (interface{}, error) {
return MarshalYAML(o)
}
func (o *outerWDefault) UnmarshalYAML(unmarshal func(interface{}) error) error {
return UnmarshalYAML(o, unmarshal)
}
func TestTypeObj(t *testing.T) {
type test struct {
descr string
str string
err bool
other string
obj outer
typeTag string
elem interface{}
expErr string
expObj interface{}
expTypeTag string
expElem interface{}
expMarshalOut string // defaults to str
}
tests := []test{
{
descr: "no type set",
str: `{}`,
err: true,
descr: "no type set",
str: `{}`,
expErr: "type field not set",
expObj: outer{},
},
{
descr: "unknown type set",
str: "type: baz",
err: true,
descr: "no type set with nontype field",
str: `other_field: aaa`,
expErr: "type field not set",
expObj: outer{},
},
{
descr: "foo set",
str: "type: foo\na: 1\n",
obj: outer{Foo: foo{A: 1}},
typeTag: "foo",
elem: foo{A: 1},
descr: "no type set with default",
str: `a: 1`,
expObj: outerWDefault{Foo: foo{A: 1}},
expTypeTag: "foo",
expElem: foo{A: 1},
expMarshalOut: "type: foo\na: 1",
},
{
descr: "bar set",
str: "type: bar\nb: 1\n",
obj: outer{Bar: &bar{B: 1}},
typeTag: "bar",
elem: &bar{B: 1},
descr: "invalid type value",
str: "type: INVALID",
expErr: "invalid type value",
expObj: outer{},
},
{
descr: "foo and other_field set",
str: "type: foo\na: 1\nother_field: aaa\n",
obj: outer{Foo: foo{A: 1}, Other: "aaa"},
typeTag: "foo",
elem: foo{A: 1},
descr: "foo set",
str: "type: foo\na: 1",
expObj: outer{Foo: foo{A: 1}},
expTypeTag: "foo",
expElem: foo{A: 1},
},
{
descr: "bar set",
str: "type: bar\nb: 1",
expObj: outer{Bar: &bar{B: 1}},
expTypeTag: "bar",
expElem: &bar{B: 1},
},
{
descr: "foo and other_field set",
str: "type: foo\na: 1\nother_field: aaa",
expObj: outer{Foo: foo{A: 1}, Other: "aaa"},
expTypeTag: "foo",
expElem: foo{A: 1},
},
{
descr: "type is same as field name",
str: "type: baz\nbaz: 3",
expObj: outer{Baz: baz{Baz: 3}},
expTypeTag: "baz",
expElem: baz{Baz: 3},
},
}
for _, test := range tests {
t.Run(test.descr, func(t *testing.T) {
var o outer
err := yaml.Unmarshal([]byte(test.str), &o)
if test.err && err != nil {
intoV := reflect.New(reflect.TypeOf(test.expObj))
err := yaml.Unmarshal([]byte(test.str), intoV.Interface())
if test.expErr != "" {
if err == nil || !strings.HasPrefix(err.Error(), test.expErr) {
t.Fatalf("expected error %q when unmarshaling but got: %v", test.expErr, err)
}
return
} else if test.err && err == nil {
t.Fatal("expected error when unmarshaling but there was none")
} else if !test.err && err != nil {
} else if test.expErr == "" && err != nil {
t.Fatalf("unmarshaling %q returned unexpected error: %v", test.str, err)
}
if !reflect.DeepEqual(o, test.obj) {
t.Fatalf("test expected value:\n%s\nbut got value:\n%s", spew.Sprint(test.obj), spew.Sprint(o))
into := intoV.Elem().Interface()
if !reflect.DeepEqual(into, test.expObj) {
t.Fatalf("test expected value:\n%s\nbut got value:\n%s", spew.Sprint(test.expObj), spew.Sprint(into))
}
elem, typeTag, err := Element(o)
elem, typeTag, err := Element(into)
if err != nil {
t.Fatalf("error when calling Element(%s): %v", spew.Sprint(o), err)
} else if !reflect.DeepEqual(elem, test.elem) {
t.Fatalf("test expected elem value:\n%s\nbut got value:\n%s", spew.Sprint(test.elem), spew.Sprint(elem))
} else if typeTag != test.typeTag {
t.Fatalf("test expected typeTag value %q but got %q", test.typeTag, typeTag)
t.Fatalf("error when calling Element(%s): %v", spew.Sprint(into), err)
} else if !reflect.DeepEqual(elem, test.expElem) {
t.Fatalf("test expected elem value:\n%s\nbut got value:\n%s", spew.Sprint(test.expElem), spew.Sprint(elem))
} else if typeTag != test.expTypeTag {
t.Fatalf("test expected typeTag value %q but got %q", test.expTypeTag, typeTag)
}
b, err := yaml.Marshal(o)
expMarshalOut := test.expMarshalOut
if expMarshalOut == "" {
expMarshalOut = test.str
}
expMarshalOut = strings.TrimSpace(expMarshalOut)
b, err := yaml.Marshal(into)
if err != nil {
t.Fatalf("error marshaling %s: %v", spew.Sprint(o), err)
} else if test.str != string(b) {
t.Fatalf("test expected to marshal to %q, but instead marshaled to %q", test.str, b)
t.Fatalf("error marshaling %s: %v", spew.Sprint(into), err)
}
marshalOut := strings.TrimSpace(string(b))
if marshalOut != expMarshalOut {
t.Fatalf("test expected to marshal to %q, but instead marshaled to %q", expMarshalOut, marshalOut)
}
})
}

@ -10,6 +10,10 @@ import (
// string.
type Blob []byte
func (b Blob) String() string {
return base64.StdEncoding.EncodeToString([]byte(b))
}
// MarshalYAML implements the yaml.Marshaler interface.
func (b Blob) MarshalYAML() (interface{}, error) {
return base64.StdEncoding.EncodeToString([]byte(b)), nil

Loading…
Cancel
Save