Răsfoiți Sursa

Include ldap_sync as a cron job

Andrea Fazzi 5 ani în urmă
părinte
comite
8d75aac170
85 a modificat fișierele cu 8633 adăugiri și 325 ștergeri
  1. 41 3
      Godeps/Godeps.json
  2. 4 0
      config/config.go
  3. 400 0
      cron/sync/sync.go
  4. 20 1
      main.go
  5. 8 8
      vendor/github.com/gorilla/schema/README.md
  6. 22 14
      vendor/github.com/gorilla/schema/cache.go
  7. 61 32
      vendor/github.com/gorilla/schema/decoder.go
  8. 1 1
      vendor/github.com/gorilla/schema/doc.go
  9. 37 3
      vendor/github.com/gorilla/schema/encoder.go
  10. 6 0
      vendor/github.com/remogatto/slicediff/README.md
  11. 41 0
      vendor/github.com/remogatto/slicediff/slicediff.go
  12. 22 0
      vendor/github.com/robfig/cron/.gitignore
  13. 1 0
      vendor/github.com/robfig/cron/.travis.yml
  14. 21 0
      vendor/github.com/robfig/cron/LICENSE
  15. 6 0
      vendor/github.com/robfig/cron/README.md
  16. 27 0
      vendor/github.com/robfig/cron/constantdelay.go
  17. 259 0
      vendor/github.com/robfig/cron/cron.go
  18. 129 0
      vendor/github.com/robfig/cron/doc.go
  19. 380 0
      vendor/github.com/robfig/cron/parser.go
  20. 158 0
      vendor/github.com/robfig/cron/spec.go
  21. 1 0
      vendor/gogs.carducci-dante.gov.it/karmen/client/.gitignore
  22. 475 0
      vendor/gogs.carducci-dante.gov.it/karmen/client/client.go
  23. 1 0
      vendor/gogs.carducci-dante.gov.it/karmen/ldap/.gitignore
  24. 370 0
      vendor/gogs.carducci-dante.gov.it/karmen/ldap/ldap.go
  25. 2 0
      vendor/gogs.carducci-dante.gov.it/karmen/util/template/.gitignore
  26. 29 0
      vendor/gogs.carducci-dante.gov.it/karmen/util/template/html.go
  27. 29 0
      vendor/gogs.carducci-dante.gov.it/karmen/util/template/template.go
  28. 18 0
      vendor/gopkg.in/asn1-ber.v1/.travis.yml
  29. 22 0
      vendor/gopkg.in/asn1-ber.v1/LICENSE
  30. 24 0
      vendor/gopkg.in/asn1-ber.v1/README.md
  31. 504 0
      vendor/gopkg.in/asn1-ber.v1/ber.go
  32. 25 0
      vendor/gopkg.in/asn1-ber.v1/content_int.go
  33. 29 0
      vendor/gopkg.in/asn1-ber.v1/header.go
  34. 103 0
      vendor/gopkg.in/asn1-ber.v1/identifier.go
  35. 81 0
      vendor/gopkg.in/asn1-ber.v1/length.go
  36. 24 0
      vendor/gopkg.in/asn1-ber.v1/util.go
  37. 9 0
      vendor/gopkg.in/gomail.v2/.travis.yml
  38. 20 0
      vendor/gopkg.in/gomail.v2/CHANGELOG.md
  39. 20 0
      vendor/gopkg.in/gomail.v2/CONTRIBUTING.md
  40. 20 0
      vendor/gopkg.in/gomail.v2/LICENSE
  41. 92 0
      vendor/gopkg.in/gomail.v2/README.md
  42. 49 0
      vendor/gopkg.in/gomail.v2/auth.go
  43. 5 0
      vendor/gopkg.in/gomail.v2/doc.go
  44. 322 0
      vendor/gopkg.in/gomail.v2/message.go
  45. 21 0
      vendor/gopkg.in/gomail.v2/mime.go
  46. 25 0
      vendor/gopkg.in/gomail.v2/mime_go14.go
  47. 116 0
      vendor/gopkg.in/gomail.v2/send.go
  48. 202 0
      vendor/gopkg.in/gomail.v2/smtp.go
  49. 306 0
      vendor/gopkg.in/gomail.v2/writeto.go
  50. 0 0
      vendor/gopkg.in/ldap.v2/.gitignore
  51. 29 0
      vendor/gopkg.in/ldap.v2/.travis.yml
  52. 22 0
      vendor/gopkg.in/ldap.v2/LICENSE
  53. 42 0
      vendor/gopkg.in/ldap.v2/Makefile
  54. 53 0
      vendor/gopkg.in/ldap.v2/README.md
  55. 113 0
      vendor/gopkg.in/ldap.v2/add.go
  56. 143 0
      vendor/gopkg.in/ldap.v2/bind.go
  57. 27 0
      vendor/gopkg.in/ldap.v2/client.go
  58. 85 0
      vendor/gopkg.in/ldap.v2/compare.go
  59. 467 0
      vendor/gopkg.in/ldap.v2/conn.go
  60. 420 0
      vendor/gopkg.in/ldap.v2/control.go
  61. 24 0
      vendor/gopkg.in/ldap.v2/debug.go
  62. 84 0
      vendor/gopkg.in/ldap.v2/del.go
  63. 244 0
      vendor/gopkg.in/ldap.v2/dn.go
  64. 4 0
      vendor/gopkg.in/ldap.v2/doc.go
  65. 148 0
      vendor/gopkg.in/ldap.v2/error.go
  66. 466 0
      vendor/gopkg.in/ldap.v2/filter.go
  67. 320 0
      vendor/gopkg.in/ldap.v2/ldap.go
  68. 170 0
      vendor/gopkg.in/ldap.v2/modify.go
  69. 148 0
      vendor/gopkg.in/ldap.v2/passwdmodify.go
  70. 450 0
      vendor/gopkg.in/ldap.v2/search.go
  71. 3 0
      vendor/gopkg.in/yaml.v2/.travis.yml
  72. 13 0
      vendor/gopkg.in/yaml.v2/NOTICE
  73. 2 2
      vendor/gopkg.in/yaml.v2/README.md
  74. 26 29
      vendor/gopkg.in/yaml.v2/apic.go
  75. 164 74
      vendor/gopkg.in/yaml.v2/decode.go
  76. 6 5
      vendor/gopkg.in/yaml.v2/emitterc.go
  77. 124 40
      vendor/gopkg.in/yaml.v2/encode.go
  78. 5 0
      vendor/gopkg.in/yaml.v2/go.mod
  79. 19 1
      vendor/gopkg.in/yaml.v2/readerc.go
  80. 65 15
      vendor/gopkg.in/yaml.v2/resolve.go
  81. 7 22
      vendor/gopkg.in/yaml.v2/scannerc.go
  82. 9 0
      vendor/gopkg.in/yaml.v2/sorter.go
  83. 1 64
      vendor/gopkg.in/yaml.v2/writerc.go
  84. 116 7
      vendor/gopkg.in/yaml.v2/yaml.go
  85. 26 4
      vendor/gopkg.in/yaml.v2/yamlh.go

+ 41 - 3
Godeps/Godeps.json

@@ -1,6 +1,6 @@
 {
 	"ImportPath": "gogs.carducci-dante.gov.it/karmen/core",
-	"GoVersion": "go1.9",
+	"GoVersion": "go1.12",
 	"GodepVersion": "v79",
 	"Deps": [
 		{
@@ -38,7 +38,8 @@
 		},
 		{
 			"ImportPath": "github.com/gorilla/schema",
-			"Rev": "e6c82218a8b3ed3cbeb5407429849c0b0b597d40"
+			"Comment": "v1.0.2-3-gda8e735",
+			"Rev": "da8e73546beca346d03fd40d5b146e06e6b98b7a"
 		},
 		{
 			"ImportPath": "github.com/gorilla/securecookie",
@@ -64,14 +65,51 @@
 			"ImportPath": "github.com/jinzhu/inflection",
 			"Rev": "1c35d901db3da928c72a72d8458480cc9ade058f"
 		},
+		{
+			"ImportPath": "github.com/remogatto/slicediff",
+			"Rev": "5db6595c4621647a2de9e82b7b1eccdb13b7aff3"
+		},
+		{
+			"ImportPath": "github.com/robfig/cron",
+			"Comment": "v1.1",
+			"Rev": "b41be1df696709bb6395fe435af20370037c0b4c"
+		},
 		{
 			"ImportPath": "github.com/sethvargo/go-password/password",
 			"Comment": "v0.1.1-2-g669be2f",
 			"Rev": "669be2fd9899edfa540cb081008de6d098461541"
 		},
+		{
+			"ImportPath": "gogs.carducci-dante.gov.it/karmen/client",
+			"Rev": "18e148968439edd35c33d117263e938cf03fb2a1"
+		},
+		{
+			"ImportPath": "gogs.carducci-dante.gov.it/karmen/ldap",
+			"Rev": "135235df9074fbeff395a0e0dc3e8656160e2c6a"
+		},
+		{
+			"ImportPath": "gogs.carducci-dante.gov.it/karmen/util/template",
+			"Rev": "8dfeb3928544bfba6c19725bd78c186e54ff6b32"
+		},
+		{
+			"ImportPath": "gopkg.in/asn1-ber.v1",
+			"Comment": "v1.2",
+			"Rev": "379148ca0225df7a432012b8df0355c2a2063ac0"
+		},
+		{
+			"ImportPath": "gopkg.in/gomail.v2",
+			"Comment": "2.0.0-23-g81ebce5",
+			"Rev": "81ebce5c23dfd25c6c67194b37d3dd3f338c98b1"
+		},
+		{
+			"ImportPath": "gopkg.in/ldap.v2",
+			"Comment": "v2.5.0",
+			"Rev": "8168ee085ee43257585e50c6441aadf54ecb2c9f"
+		},
 		{
 			"ImportPath": "gopkg.in/yaml.v2",
-			"Rev": "287cf08546ab5e7e37d55a84f7ed3fd1db036de5"
+			"Comment": "v2.2.2",
+			"Rev": "51d6538a90f86fe93ac480b35f37b2be17fef232"
 		}
 	]
 }

+ 4 - 0
config/config.go

@@ -74,6 +74,10 @@ type ConfigT struct {
 		From     string
 		Cc       string
 	}
+
+	Sync struct {
+		SafeRun bool `yaml:"safe_run"`
+	}
 }
 
 var Config *ConfigT

+ 400 - 0
cron/sync/sync.go

@@ -0,0 +1,400 @@
+package sync
+
+import (
+	"bytes"
+	"crypto/tls"
+	"fmt"
+	"log"
+	"strings"
+
+	"github.com/remogatto/slicediff"
+	"github.com/sethvargo/go-password/password"
+
+	gomail "gopkg.in/gomail.v2"
+	ldap "gopkg.in/ldap.v2"
+
+	"text/template"
+
+	karmen_client "gogs.carducci-dante.gov.it/karmen/client"
+	"gogs.carducci-dante.gov.it/karmen/core/config"
+	"gogs.carducci-dante.gov.it/karmen/core/orm"
+	karmen_ldap "gogs.carducci-dante.gov.it/karmen/ldap"
+
+	tpl_util "gogs.carducci-dante.gov.it/karmen/util/template"
+)
+
+type TeachersDsts []*ldap.Entry
+type TeachersSrcs []*orm.Teacher
+
+type TeachersMLSrcs []*orm.Teacher
+
+func (d TeachersDsts) Strings() (result []string) {
+	for _, entry := range d {
+		result = append(
+			result,
+			entry.DN,
+		)
+	}
+	return
+}
+
+func (s TeachersSrcs) Strings() (result []string) {
+	for _, teacher := range s {
+		result = append(
+			result,
+			fmt.Sprintf("cn=%s %s,%s", teacher.Name, teacher.Surname, "ou=Docenti,ou=Persone,dc=carducci-dante,dc=gov,dc=it"),
+		)
+	}
+	return
+}
+
+type TeachersGroupDsts []string
+type TeachersGroupSrcs []*orm.Teacher
+
+func (d TeachersGroupDsts) Strings() (result []string) {
+	return d
+}
+
+func (s TeachersGroupSrcs) Strings() (result []string) {
+	for _, teacher := range s {
+		result = append(
+			result,
+			teacher.Username(),
+		)
+	}
+	return
+}
+
+type TeachersMLDsts []*orm.Teacher
+
+func (s TeachersMLDsts) Strings() (result []string) {
+	for _, teacher := range s {
+		result = append(
+			result,
+			fmt.Sprintf("cn=%s %s,%s", teacher.Name, teacher.Surname, "ou=Docenti,ou=Persone,dc=carducci-dante,dc=gov,dc=it"),
+		)
+	}
+	return
+}
+
+type SyncJob struct {
+	conf *config.ConfigT
+}
+
+func NewSyncJob(conf *config.ConfigT) *SyncJob {
+	return &SyncJob{conf}
+}
+
+func (syncJob *SyncJob) sendMail(teacher *orm.Teacher, tpl *template.Template) error {
+	var buf bytes.Buffer
+
+	err := tpl.Execute(&buf, teacher)
+	if err != nil {
+		return err
+	}
+
+	if !syncJob.conf.Sync.SafeRun {
+		log.Printf("Invio le credenziali a %s", teacher.AltEmail)
+		m := gomail.NewMessage()
+		m.SetHeader("From", syncJob.conf.Smtp.From)
+		m.SetHeader("To", teacher.AltEmail)
+		m.SetHeader("Cc", syncJob.conf.Smtp.Cc)
+		m.SetHeader("Subject", fmt.Sprintf("Attivazione dell'utenza %s per l'accesso ai servizi informatici del Liceo \"Carducci-Dante\" di Trieste", teacher.CompleteName()))
+		m.SetBody("text/plain", buf.String())
+
+		d := gomail.NewDialer(syncJob.conf.Smtp.Host, syncJob.conf.Smtp.Port, syncJob.conf.Smtp.Username, syncJob.conf.Smtp.Password)
+		d.TLSConfig = &tls.Config{InsecureSkipVerify: true}
+
+		if err := d.DialAndSend(m); err != nil {
+			return err
+		}
+	} else {
+		log.Printf("Invierei le credenziali a %s, la nuova password è %s", teacher.AltEmail, teacher.PlainPassword)
+	}
+
+	return nil
+}
+
+func (syncJob *SyncJob) syncTeachers(ldapClient *karmen_ldap.Client, karmenClient *karmen_client.Client, entries []*ldap.Entry, teachers []*orm.Teacher) error {
+
+	mailTpl, err := tpl_util.LoadTextTemplate("mail.tpl")
+	if err != nil {
+		log.Println(err)
+	}
+
+	actions := slicediff.Diff(TeachersDsts(entries), TeachersSrcs(teachers))
+
+	toBeAdded := make([]*orm.Teacher, 0)
+	toBeRemoved := make([]string, 0)
+	toBeUpdated := make([]*orm.Teacher, 0)
+
+	for _, a := range actions {
+		switch a.Type {
+		case slicediff.Remove:
+			toBeRemoved = append(toBeRemoved, entries[a.Id].DN)
+
+		case slicediff.Add:
+			teacher := teachers[a.Id]
+			if !teacher.Exclude {
+				toBeAdded = append(toBeAdded, teacher)
+			}
+
+		case slicediff.Update:
+			teacher := teachers[a.Id]
+			if !teacher.Exclude && teacher.Regenerate {
+				toBeUpdated = append(toBeUpdated, teacher)
+			}
+
+		}
+	}
+
+	// Add
+
+	if !syncJob.conf.Sync.SafeRun {
+		log.Printf("Aggiungo %d utenze", len(toBeAdded))
+		for _, teacher := range toBeAdded {
+			password, err := password.Generate(8, 2, 0, false, true)
+			if err != nil {
+				return err
+			}
+
+			teacher.PlainPassword = password
+
+			err = ldapClient.AddTeacher(teacher)
+			if err != nil {
+				return err
+			}
+
+			log.Printf("Ho aggiunto il docente %s", teacher.CompleteName())
+
+			if teacher.Regenerate {
+				err = syncJob.sendMail(teacher, mailTpl)
+				if err != nil {
+					return err
+				}
+
+				teacher.Regenerate = false
+
+				err = karmenClient.UpdateTeacher(teacher)
+				if err != nil {
+					return err
+				}
+
+			}
+
+		}
+	} else {
+		if len(toBeAdded) == 0 {
+			log.Println("Non verranno aggiunte utenze")
+		} else {
+			log.Printf("Verranno aggiunte %d utenze", len(toBeAdded))
+			for _, teacher := range toBeAdded {
+				log.Println(teacher.CompleteName())
+				if teacher.Regenerate {
+					password, err := password.Generate(8, 2, 0, false, true)
+					if err != nil {
+						return err
+					}
+
+					teacher.PlainPassword = password
+					err = syncJob.sendMail(teacher, mailTpl)
+					if err != nil {
+						return err
+					}
+				}
+
+			}
+		}
+	}
+
+	// Remove
+
+	if !syncJob.conf.Sync.SafeRun {
+		log.Printf("Rimuovo %d utenze", len(toBeRemoved))
+		for _, entry := range toBeRemoved {
+			err := ldapClient.DeleteByDN(entry)
+			if err != nil {
+				return err
+			}
+			log.Printf("Rimuovo %s", entry)
+		}
+	} else {
+		log.Printf("Verranno rimosse %d utenze", len(toBeRemoved))
+		for _, entry := range toBeRemoved {
+			log.Println(entry)
+		}
+	}
+
+	// Update
+
+	if !syncJob.conf.Sync.SafeRun {
+		log.Printf("Aggiorno %d utenze", len(toBeUpdated))
+		for _, teacher := range toBeUpdated {
+			if teacher.Regenerate {
+				password, err := password.Generate(8, 2, 0, false, true)
+				if err != nil {
+					return err
+				}
+
+				teacher.PlainPassword = password
+				err = syncJob.sendMail(teacher, mailTpl)
+				if err != nil {
+					return err
+				}
+
+				teacher.Regenerate = false
+
+				err = karmenClient.UpdateTeacher(teacher)
+				if err != nil {
+					return err
+				}
+				err = ldapClient.UpdateTeacherPassword(teacher, teacher.PlainPassword)
+				if err != nil {
+					return err
+
+				}
+			}
+		}
+
+	} else {
+		log.Printf("Verranno aggiornate %d utenze", len(toBeUpdated))
+		for _, teacher := range toBeUpdated {
+			log.Println(teacher.CompleteName())
+		}
+	}
+
+	return nil
+}
+
+func (syncJob *SyncJob) syncGroup(ldapClient *karmen_ldap.Client, groupDN string, teachers []*orm.Teacher) error {
+	actions := make(map[string]*slicediff.Action)
+	entries, err := ldapClient.GroupMembers(groupDN)
+	if err != nil {
+		return err
+	}
+
+	values := entries[0].Attributes[0].Values
+
+	if strings.Contains(groupDN, "Mailing Lists") {
+		actions = slicediff.Diff(TeachersGroupDsts(values), TeachersMLDsts(teachers))
+	} else {
+		actions = slicediff.Diff(TeachersGroupDsts(values), TeachersGroupSrcs(teachers))
+	}
+
+	toBeAdded := make([]*orm.Teacher, 0)
+	toBeRemoved := make([]string, 0)
+
+	for _, a := range actions {
+		switch a.Type {
+		case slicediff.Remove:
+			toBeRemoved = append(toBeRemoved, values[a.Id])
+
+		case slicediff.Add:
+			teacher := teachers[a.Id]
+			if !teacher.Exclude {
+				toBeAdded = append(toBeAdded, teacher)
+			}
+
+		}
+	}
+
+	if !syncJob.conf.Sync.SafeRun {
+		log.Printf("Aggiungo %d docenti al gruppo %s", len(toBeAdded), groupDN)
+		for _, teacher := range toBeAdded {
+			log.Println(teacher.CompleteName())
+			err = ldapClient.AddTeacherToGroup(teacher, groupDN)
+			if err != nil {
+				return err
+			}
+		}
+	} else {
+		if len(toBeAdded) == 0 {
+			log.Printf("Non verranno aggiunti docenti al gruppo %s", groupDN)
+		} else {
+			log.Printf("Verranno aggiunti %d docenti al gruppo %s", len(toBeAdded), groupDN)
+			for _, teacher := range toBeAdded {
+				log.Println(teacher.CompleteName())
+			}
+		}
+	}
+
+	if !syncJob.conf.Sync.SafeRun {
+		log.Printf("Rimuovo %d docenti dal gruppo %s", len(toBeRemoved), groupDN)
+		for _, value := range toBeRemoved {
+			log.Printf("RIMUOVO il docente %s dal gruppo %s", value, groupDN)
+			ldapClient.RemoveTeacherFromGroupByMemberValue(value, groupDN)
+		}
+	} else {
+		log.Printf("Verranno rimossi %d docenti dal gruppo %s", len(toBeRemoved), groupDN)
+		for _, entry := range toBeRemoved {
+			log.Println(entry)
+		}
+	}
+
+	return nil
+}
+
+func (syncJob *SyncJob) Run() {
+	if syncJob.conf.Sync.SafeRun {
+		log.Println("Sto girando in modalità SICURA!")
+	}
+
+	log.Printf("Mi connetto a karmen %s...", syncJob.conf.Url)
+	karmenClient, err := karmen_client.Dial(syncJob.conf.Url, syncJob.conf.Admin.Username, syncJob.conf.Admin.Password)
+	if err != nil {
+		log.Println(err)
+	}
+
+	teachers, err := karmenClient.GetTeachers()
+	if err != nil {
+		log.Println(err)
+	}
+
+	log.Printf("Mi connetto alla directory LDAP %s...", syncJob.conf.Ldap.Host)
+	ldapClient, err := karmen_ldap.NewClient(syncJob.conf.Ldap.Host, syncJob.conf)
+	if err != nil {
+		log.Println(err)
+	}
+
+	log.Println("Recupero i docenti su LDAP...")
+	entries, err := ldapClient.Teachers()
+	if err != nil {
+		log.Println(err)
+	}
+
+	log.Println("Recupero i dipartimenti...")
+	departments, err := karmenClient.GetDepartments()
+	if err != nil {
+		log.Println(err)
+	}
+
+	log.Println("Sincronizzo i docenti su LDAP...")
+	if err := syncJob.syncTeachers(ldapClient, karmenClient, entries, teachers); err != nil {
+		panic(err)
+	}
+
+	log.Println("Sincronizzo il gruppo 'Docenti'...")
+	if err := syncJob.syncGroup(ldapClient, "cn=Docenti", teachers); err != nil {
+		panic(err)
+	}
+
+	log.Println("Sincronizzo la ML 'Tutti i docenti'...")
+	if err := syncJob.syncGroup(ldapClient, "cn=Tutti i docenti,ou=Mailing Lists", teachers); err != nil {
+		panic(err)
+	}
+
+	log.Println("Sincronizzo gruppi e ML dei dipartimenti su LDAP...")
+	for _, department := range departments {
+		group := fmt.Sprintf("cn=%s,ou=Mailing Lists", department.Name)
+		err = syncJob.syncGroup(ldapClient, group, department.Teachers)
+		if err != nil {
+			panic(err)
+		}
+		group = fmt.Sprintf("cn=%s,ou=Dipartimenti", department.Name)
+		err = syncJob.syncGroup(ldapClient, group, department.Teachers)
+		if err != nil {
+			panic(err)
+		}
+
+	}
+}

+ 20 - 1
main.go

@@ -9,15 +9,17 @@ import (
 
 	"github.com/gorilla/handlers"
 	"github.com/jinzhu/gorm"
+	"github.com/robfig/cron"
 
 	"gogs.carducci-dante.gov.it/karmen/core/config"
+	"gogs.carducci-dante.gov.it/karmen/core/cron/sync"
 	karmen_handlers "gogs.carducci-dante.gov.it/karmen/core/handlers"
 	"gogs.carducci-dante.gov.it/karmen/core/orm"
 )
 
 const (
 	MaxNumRetries     = 20
-	RetryTimeInterval = 2
+	RetryTimeInterval = 5
 )
 
 func main() {
@@ -33,6 +35,8 @@ func main() {
 		panic(err)
 	}
 
+	log.Println(config.Config)
+
 	log.Println("Starting karmen and waiting for the DB...")
 
 	count := MaxNumRetries
@@ -57,8 +61,23 @@ func main() {
 		orm.AutoMigrate()
 	}
 
+	log.Println("Starting cron jobs...")
+
+	c := cron.New()
+
+	syncJob := sync.NewSyncJob(config.Config)
+
+	c.AddJob("@every 1m", syncJob)
+
+	c.Start()
+
+	for _, entry := range c.Entries() {
+		log.Println(entry)
+	}
+
 	log.Println("karmen is listening to port 3000...")
 	if err := http.ListenAndServe(":3000", handlers.LoggingHandler(os.Stdout, karmen_handlers.Handlers())); err != nil {
 		panic(err)
 	}
+
 }

+ 8 - 8
vendor/github.com/gorilla/schema/README.md

@@ -11,7 +11,7 @@ Package gorilla/schema converts structs to and from form values.
 Here's a quick example: we parse POST form values and then decode them into a struct:
 
 ```go
-// Set a Decoder instance as a package global, because it caches 
+// Set a Decoder instance as a package global, because it caches
 // meta-data about structs, and an instance can be shared safely.
 var decoder = schema.NewDecoder()
 
@@ -27,9 +27,9 @@ func MyHandler(w http.ResponseWriter, r *http.Request) {
     }
 
     var person Person
-    
+
     // r.PostForm is a map of our POST form values
-    err := decoder.Decode(&person, r.PostForm)
+    err = decoder.Decode(&person, r.PostForm)
     if err != nil {
         // Handle error
     }
@@ -64,9 +64,9 @@ To define custom names for fields, use a struct tag "schema". To not populate ce
 
 ```go
 type Person struct {
-    Name  string `schema:"name"`  // custom name
-    Phone string `schema:"phone"` // custom name
-    Admin bool   `schema:"-"`     // this field is never set
+    Name  string `schema:"name,required"`  // custom name, must be supplied
+    Phone string `schema:"phone"`          // custom name
+    Admin bool   `schema:"-"`              // this field is never set
 }
 ```
 
@@ -83,8 +83,8 @@ The supported field types in the struct are:
 
 Unsupported types are simply ignored, however custom types can be registered to be converted.
 
-More examples are available on the Gorilla website: http://www.gorillatoolkit.org/pkg/schema
+More examples are available on the Gorilla website: https://www.gorillatoolkit.org/pkg/schema
 
-## License 
+## License
 
 BSD licensed. See the LICENSE file for details.

+ 22 - 14
vendor/github.com/gorilla/schema/cache.go

@@ -63,7 +63,7 @@ func (c *cache) parsePath(p string, t reflect.Type) ([]pathPart, error) {
 		}
 		// Valid field. Append index.
 		path = append(path, field.name)
-		if field.ss {
+		if field.isSliceOfStructs && (!field.unmarshalerInfo.IsValid || (field.unmarshalerInfo.IsValid && field.unmarshalerInfo.IsSliceElement)) {
 			// Parse a special case: slices of structs.
 			// i+1 must be the slice index.
 			//
@@ -142,7 +142,7 @@ func (c *cache) create(t reflect.Type, info *structInfo) *structInfo {
 				c.create(ft, info)
 				for _, fi := range info.fields[bef:len(info.fields)] {
 					// exclude required check because duplicated to embedded field
-					fi.required = false
+					fi.isRequired = false
 				}
 			}
 		}
@@ -162,6 +162,7 @@ func (c *cache) createField(field reflect.StructField, info *structInfo) {
 	// First let's get the basic type.
 	isSlice, isStruct := false, false
 	ft := field.Type
+	m := isTextUnmarshaler(reflect.Zero(ft))
 	if ft.Kind() == reflect.Ptr {
 		ft = ft.Elem()
 	}
@@ -185,12 +186,13 @@ func (c *cache) createField(field reflect.StructField, info *structInfo) {
 	}
 
 	info.fields = append(info.fields, &fieldInfo{
-		typ:      field.Type,
-		name:     field.Name,
-		ss:       isSlice && isStruct,
-		alias:    alias,
-		anon:     field.Anonymous,
-		required: options.Contains("required"),
+		typ:              field.Type,
+		name:             field.Name,
+		alias:            alias,
+		unmarshalerInfo:  m,
+		isSliceOfStructs: isSlice && isStruct,
+		isAnonymous:      field.Anonymous,
+		isRequired:       options.Contains("required"),
 	})
 }
 
@@ -215,12 +217,18 @@ func (i *structInfo) get(alias string) *fieldInfo {
 }
 
 type fieldInfo struct {
-	typ      reflect.Type
-	name     string // field name in the struct.
-	ss       bool   // true if this is a slice of structs.
-	alias    string
-	anon     bool // is an embedded field
-	required bool // tag option
+	typ reflect.Type
+	// name is the field name in the struct.
+	name  string
+	alias string
+	// unmarshalerInfo contains information regarding the
+	// encoding.TextUnmarshaler implementation of the field type.
+	unmarshalerInfo unmarshaler
+	// isSliceOfStructs indicates if the field type is a slice of structs.
+	isSliceOfStructs bool
+	// isAnonymous indicates whether the field is embedded in the struct.
+	isAnonymous bool
+	isRequired  bool
 }
 
 type pathPart struct {

+ 61 - 32
vendor/github.com/gorilla/schema/decoder.go

@@ -106,7 +106,7 @@ func (d *Decoder) checkRequired(t reflect.Type, src map[string][]string, prefix
 		if f.typ.Kind() == reflect.Struct {
 			err := d.checkRequired(f.typ, src, prefix+f.alias+".")
 			if err != nil {
-				if !f.anon {
+				if !f.isAnonymous {
 					return err
 				}
 				// check embedded parent field.
@@ -116,7 +116,7 @@ func (d *Decoder) checkRequired(t reflect.Type, src map[string][]string, prefix
 				}
 			}
 		}
-		if f.required {
+		if f.isRequired {
 			key := f.alias
 			if prefix != "" {
 				key = prefix + key
@@ -143,7 +143,6 @@ func isEmpty(t reflect.Type, value []string) bool {
 
 // decode fills a struct field using a parsed path.
 func (d *Decoder) decode(v reflect.Value, path string, parts []pathPart, values []string) error {
-
 	// Get the field walking the struct fields by index.
 	for _, name := range parts[0].path {
 		if v.Type().Kind() == reflect.Ptr {
@@ -185,7 +184,8 @@ func (d *Decoder) decode(v reflect.Value, path string, parts []pathPart, values
 
 	// Get the converter early in case there is one for a slice type.
 	conv := d.cache.converter(t)
-	if conv == nil && t.Kind() == reflect.Slice {
+	m := isTextUnmarshaler(v)
+	if conv == nil && t.Kind() == reflect.Slice && m.IsSliceElement {
 		var items []reflect.Value
 		elemT := t.Elem()
 		isPtrElem := elemT.Kind() == reflect.Ptr
@@ -209,9 +209,9 @@ func (d *Decoder) decode(v reflect.Value, path string, parts []pathPart, values
 				if d.zeroEmpty {
 					items = append(items, reflect.Zero(elemT))
 				}
-			} else if m := isTextUnmarshaler(v); m.IsValid {
+			} else if m.IsValid {
 				u := reflect.New(elemT)
-				if m.IsPtr {
+				if m.IsSliceElementPtr {
 					u = reflect.New(reflect.PtrTo(elemT).Elem())
 				}
 				if err := u.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(value)); err != nil {
@@ -222,7 +222,7 @@ func (d *Decoder) decode(v reflect.Value, path string, parts []pathPart, values
 						Err:   err,
 					}
 				}
-				if m.IsPtr {
+				if m.IsSliceElementPtr {
 					items = append(items, u.Elem().Addr())
 				} else if u.Kind() == reflect.Ptr {
 					items = append(items, u.Elem())
@@ -283,11 +283,7 @@ func (d *Decoder) decode(v reflect.Value, path string, parts []pathPart, values
 			val = values[len(values)-1]
 		}
 
-		if val == "" {
-			if d.zeroEmpty {
-				v.Set(reflect.Zero(t))
-			}
-		} else if conv != nil {
+		if conv != nil {
 			if value := conv(val); value.IsValid() {
 				v.Set(value.Convert(t))
 			} else {
@@ -297,17 +293,34 @@ func (d *Decoder) decode(v reflect.Value, path string, parts []pathPart, values
 					Index: -1,
 				}
 			}
-		} else if m := isTextUnmarshaler(v); m.IsValid {
-			// If the value implements the encoding.TextUnmarshaler interface
-			// apply UnmarshalText as the converter
-			if err := m.Unmarshaler.UnmarshalText([]byte(val)); err != nil {
-				return ConversionError{
-					Key:   path,
-					Type:  t,
-					Index: -1,
-					Err:   err,
+		} else if m.IsValid {
+			if m.IsPtr {
+				u := reflect.New(v.Type())
+				if err := u.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(val)); err != nil {
+					return ConversionError{
+						Key:   path,
+						Type:  t,
+						Index: -1,
+						Err:   err,
+					}
+				}
+				v.Set(reflect.Indirect(u))
+			} else {
+				// If the value implements the encoding.TextUnmarshaler interface
+				// apply UnmarshalText as the converter
+				if err := m.Unmarshaler.UnmarshalText([]byte(val)); err != nil {
+					return ConversionError{
+						Key:   path,
+						Type:  t,
+						Index: -1,
+						Err:   err,
+					}
 				}
 			}
+		} else if val == "" {
+			if d.zeroEmpty {
+				v.Set(reflect.Zero(t))
+			}
 		} else if conv := builtinConverters[t.Kind()]; conv != nil {
 			if value := conv(val); value.IsValid() {
 				v.Set(value.Convert(t))
@@ -326,18 +339,18 @@ func (d *Decoder) decode(v reflect.Value, path string, parts []pathPart, values
 }
 
 func isTextUnmarshaler(v reflect.Value) unmarshaler {
-
 	// Create a new unmarshaller instance
 	m := unmarshaler{}
-
-	// As the UnmarshalText function should be applied
-	// to the pointer of the type, we convert the value to pointer.
-	if v.CanAddr() {
-		v = v.Addr()
-	}
 	if m.Unmarshaler, m.IsValid = v.Interface().(encoding.TextUnmarshaler); m.IsValid {
 		return m
 	}
+	// As the UnmarshalText function should be applied to the pointer of the
+	// type, we check that type to see if it implements the necessary
+	// method.
+	if m.Unmarshaler, m.IsValid = reflect.New(v.Type()).Interface().(encoding.TextUnmarshaler); m.IsValid {
+		m.IsPtr = true
+		return m
+	}
 
 	// if v is []T or *[]T create new T
 	t := v.Type()
@@ -345,11 +358,17 @@ func isTextUnmarshaler(v reflect.Value) unmarshaler {
 		t = t.Elem()
 	}
 	if t.Kind() == reflect.Slice {
-		// if t is a pointer slice, check if it implements encoding.TextUnmarshaler
+		// Check if the slice implements encoding.TextUnmarshaller
+		if m.Unmarshaler, m.IsValid = v.Interface().(encoding.TextUnmarshaler); m.IsValid {
+			return m
+		}
+		// If t is a pointer slice, check if its elements implement
+		// encoding.TextUnmarshaler
+		m.IsSliceElement = true
 		if t = t.Elem(); t.Kind() == reflect.Ptr {
 			t = reflect.PtrTo(t.Elem())
 			v = reflect.Zero(t)
-			m.IsPtr = true
+			m.IsSliceElementPtr = true
 			m.Unmarshaler, m.IsValid = v.Interface().(encoding.TextUnmarshaler)
 			return m
 		}
@@ -364,8 +383,18 @@ func isTextUnmarshaler(v reflect.Value) unmarshaler {
 // unmarshaller contains information about a TextUnmarshaler type
 type unmarshaler struct {
 	Unmarshaler encoding.TextUnmarshaler
-	IsPtr       bool
-	IsValid     bool
+	// IsValid indicates whether the resolved type indicated by the other
+	// flags implements the encoding.TextUnmarshaler interface.
+	IsValid bool
+	// IsPtr indicates that the resolved type is the pointer of the original
+	// type.
+	IsPtr bool
+	// IsSliceElement indicates that the resolved type is a slice element of
+	// the original type.
+	IsSliceElement bool
+	// IsSliceElementPtr indicates that the resolved type is a pointer to a
+	// slice element of the original type.
+	IsSliceElementPtr bool
 }
 
 // Errors ---------------------------------------------------------------------

+ 1 - 1
vendor/github.com/gorilla/schema/doc.go

@@ -121,7 +121,7 @@ field, we could not translate multiple values to it if we did not use an
 index for the parent struct.
 
 There's also the possibility to create a custom type that implements the
-TextUnmarshaler interface, and in this case there's no need to registry
+TextUnmarshaler interface, and in this case there's no need to register
 a converter, like:
 
 	type Person struct {

+ 37 - 3
vendor/github.com/gorilla/schema/encoder.go

@@ -40,6 +40,34 @@ func (e *Encoder) SetAliasTag(tag string) {
 	e.cache.tag = tag
 }
 
+// isValidStructPointer test if input value is a valid struct pointer.
+func isValidStructPointer(v reflect.Value) bool {
+	return v.Type().Kind() == reflect.Ptr && v.Elem().IsValid() && v.Elem().Type().Kind() == reflect.Struct
+}
+
+func isZero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Func:
+	case reflect.Map, reflect.Slice:
+		return v.IsNil() || v.Len() == 0
+	case reflect.Array:
+		z := true
+		for i := 0; i < v.Len(); i++ {
+			z = z && isZero(v.Index(i))
+		}
+		return z
+	case reflect.Struct:
+		z := true
+		for i := 0; i < v.NumField(); i++ {
+			z = z && isZero(v.Field(i))
+		}
+		return z
+	}
+	// Compare other types directly:
+	z := reflect.Zero(v.Type())
+	return v.Interface() == z.Interface()
+}
+
 func (e *Encoder) encode(v reflect.Value, dst map[string][]string) error {
 	if v.Kind() == reflect.Ptr {
 		v = v.Elem()
@@ -57,8 +85,9 @@ func (e *Encoder) encode(v reflect.Value, dst map[string][]string) error {
 			continue
 		}
 
-		if v.Field(i).Type().Kind() == reflect.Struct {
-			e.encode(v.Field(i), dst)
+		// Encode struct pointer types if the field is a valid pointer and a struct.
+		if isValidStructPointer(v.Field(i)) {
+			e.encode(v.Field(i).Elem(), dst)
 			continue
 		}
 
@@ -67,7 +96,7 @@ func (e *Encoder) encode(v reflect.Value, dst map[string][]string) error {
 		// Encode non-slice types and custom implementations immediately.
 		if encFunc != nil {
 			value := encFunc(v.Field(i))
-			if opts.Contains("omitempty") && v.Field(i).Interface() == reflect.Zero(reflect.TypeOf(v.Field(i).Interface())).Interface() {
+			if opts.Contains("omitempty") && isZero(v.Field(i)) {
 				continue
 			}
 
@@ -75,6 +104,11 @@ func (e *Encoder) encode(v reflect.Value, dst map[string][]string) error {
 			continue
 		}
 
+		if v.Field(i).Type().Kind() == reflect.Struct {
+			e.encode(v.Field(i), dst)
+			continue
+		}
+
 		if v.Field(i).Type().Kind() == reflect.Slice {
 			encFunc = typeEncoder(v.Field(i).Type().Elem(), e.regenc)
 		}

+ 6 - 0
vendor/github.com/remogatto/slicediff/README.md

@@ -0,0 +1,6 @@
+# SliceDiff
+Get the diff from two slices.
+
+# License
+
+MIT

+ 41 - 0
vendor/github.com/remogatto/slicediff/slicediff.go

@@ -0,0 +1,41 @@
+package slicediff
+
+const (
+	Remove = iota + 1
+	Add
+	Update
+)
+
+type Action struct {
+	Id   int
+	Type int
+}
+
+type StringSlicer interface {
+	Strings() []string
+}
+
+var ActionString = [3]string{"REMOVE", "ADD", "UPDATE"}
+
+func Diff(dst StringSlicer, src StringSlicer) map[string]*Action {
+	actions := make(map[string]*Action)
+	m := make(map[string]bool)
+
+	for id, s := range dst.Strings() {
+		actions[s] = &Action{id, Remove}
+	}
+
+	for id, s := range src.Strings() {
+		if _, ok := m[s]; !ok {
+			m[s] = true
+			_, ok := actions[s]
+			if !ok {
+				actions[s] = &Action{id, Add}
+				continue
+			}
+			actions[s] = &Action{id, Update}
+		}
+	}
+
+	return actions
+}

+ 22 - 0
vendor/github.com/robfig/cron/.gitignore

@@ -0,0 +1,22 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe

+ 1 - 0
vendor/github.com/robfig/cron/.travis.yml

@@ -0,0 +1 @@
+language: go

+ 21 - 0
vendor/github.com/robfig/cron/LICENSE

@@ -0,0 +1,21 @@
+Copyright (C) 2012 Rob Figueiredo
+All Rights Reserved.
+
+MIT LICENSE
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 6 - 0
vendor/github.com/robfig/cron/README.md

@@ -0,0 +1,6 @@
+[![GoDoc](http://godoc.org/github.com/robfig/cron?status.png)](http://godoc.org/github.com/robfig/cron) 
+[![Build Status](https://travis-ci.org/robfig/cron.svg?branch=master)](https://travis-ci.org/robfig/cron)
+
+# cron
+
+Documentation here: https://godoc.org/github.com/robfig/cron

+ 27 - 0
vendor/github.com/robfig/cron/constantdelay.go

@@ -0,0 +1,27 @@
+package cron
+
+import "time"
+
+// ConstantDelaySchedule represents a simple recurring duty cycle, e.g. "Every 5 minutes".
+// It does not support jobs more frequent than once a second.
+type ConstantDelaySchedule struct {
+	Delay time.Duration
+}
+
+// Every returns a crontab Schedule that activates once every duration.
+// Delays of less than a second are not supported (will round up to 1 second).
+// Any fields less than a Second are truncated.
+func Every(duration time.Duration) ConstantDelaySchedule {
+	if duration < time.Second {
+		duration = time.Second
+	}
+	return ConstantDelaySchedule{
+		Delay: duration - time.Duration(duration.Nanoseconds())%time.Second,
+	}
+}
+
+// Next returns the next time this should be run.
+// This rounds so that the next activation time will be on the second.
+func (schedule ConstantDelaySchedule) Next(t time.Time) time.Time {
+	return t.Add(schedule.Delay - time.Duration(t.Nanosecond())*time.Nanosecond)
+}

+ 259 - 0
vendor/github.com/robfig/cron/cron.go

@@ -0,0 +1,259 @@
+package cron
+
+import (
+	"log"
+	"runtime"
+	"sort"
+	"time"
+)
+
+// Cron keeps track of any number of entries, invoking the associated func as
+// specified by the schedule. It may be started, stopped, and the entries may
+// be inspected while running.
+type Cron struct {
+	entries  []*Entry
+	stop     chan struct{}
+	add      chan *Entry
+	snapshot chan []*Entry
+	running  bool
+	ErrorLog *log.Logger
+	location *time.Location
+}
+
+// Job is an interface for submitted cron jobs.
+type Job interface {
+	Run()
+}
+
+// The Schedule describes a job's duty cycle.
+type Schedule interface {
+	// Return the next activation time, later than the given time.
+	// Next is invoked initially, and then each time the job is run.
+	Next(time.Time) time.Time
+}
+
+// Entry consists of a schedule and the func to execute on that schedule.
+type Entry struct {
+	// The schedule on which this job should be run.
+	Schedule Schedule
+
+	// The next time the job will run. This is the zero time if Cron has not been
+	// started or this entry's schedule is unsatisfiable
+	Next time.Time
+
+	// The last time this job was run. This is the zero time if the job has never
+	// been run.
+	Prev time.Time
+
+	// The Job to run.
+	Job Job
+}
+
+// byTime is a wrapper for sorting the entry array by time
+// (with zero time at the end).
+type byTime []*Entry
+
+func (s byTime) Len() int      { return len(s) }
+func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s byTime) Less(i, j int) bool {
+	// Two zero times should return false.
+	// Otherwise, zero is "greater" than any other time.
+	// (To sort it at the end of the list.)
+	if s[i].Next.IsZero() {
+		return false
+	}
+	if s[j].Next.IsZero() {
+		return true
+	}
+	return s[i].Next.Before(s[j].Next)
+}
+
+// New returns a new Cron job runner, in the Local time zone.
+func New() *Cron {
+	return NewWithLocation(time.Now().Location())
+}
+
+// NewWithLocation returns a new Cron job runner.
+func NewWithLocation(location *time.Location) *Cron {
+	return &Cron{
+		entries:  nil,
+		add:      make(chan *Entry),
+		stop:     make(chan struct{}),
+		snapshot: make(chan []*Entry),
+		running:  false,
+		ErrorLog: nil,
+		location: location,
+	}
+}
+
+// A wrapper that turns a func() into a cron.Job
+type FuncJob func()
+
+func (f FuncJob) Run() { f() }
+
+// AddFunc adds a func to the Cron to be run on the given schedule.
+func (c *Cron) AddFunc(spec string, cmd func()) error {
+	return c.AddJob(spec, FuncJob(cmd))
+}
+
+// AddJob adds a Job to the Cron to be run on the given schedule.
+func (c *Cron) AddJob(spec string, cmd Job) error {
+	schedule, err := Parse(spec)
+	if err != nil {
+		return err
+	}
+	c.Schedule(schedule, cmd)
+	return nil
+}
+
+// Schedule adds a Job to the Cron to be run on the given schedule.
+func (c *Cron) Schedule(schedule Schedule, cmd Job) {
+	entry := &Entry{
+		Schedule: schedule,
+		Job:      cmd,
+	}
+	if !c.running {
+		c.entries = append(c.entries, entry)
+		return
+	}
+
+	c.add <- entry
+}
+
+// Entries returns a snapshot of the cron entries.
+func (c *Cron) Entries() []*Entry {
+	if c.running {
+		c.snapshot <- nil
+		x := <-c.snapshot
+		return x
+	}
+	return c.entrySnapshot()
+}
+
+// Location gets the time zone location
+func (c *Cron) Location() *time.Location {
+	return c.location
+}
+
+// Start the cron scheduler in its own go-routine, or no-op if already started.
+func (c *Cron) Start() {
+	if c.running {
+		return
+	}
+	c.running = true
+	go c.run()
+}
+
+// Run the cron scheduler, or no-op if already running.
+func (c *Cron) Run() {
+	if c.running {
+		return
+	}
+	c.running = true
+	c.run()
+}
+
+func (c *Cron) runWithRecovery(j Job) {
+	defer func() {
+		if r := recover(); r != nil {
+			const size = 64 << 10
+			buf := make([]byte, size)
+			buf = buf[:runtime.Stack(buf, false)]
+			c.logf("cron: panic running job: %v\n%s", r, buf)
+		}
+	}()
+	j.Run()
+}
+
+// Run the scheduler. this is private just due to the need to synchronize
+// access to the 'running' state variable.
+func (c *Cron) run() {
+	// Figure out the next activation times for each entry.
+	now := c.now()
+	for _, entry := range c.entries {
+		entry.Next = entry.Schedule.Next(now)
+	}
+
+	for {
+		// Determine the next entry to run.
+		sort.Sort(byTime(c.entries))
+
+		var timer *time.Timer
+		if len(c.entries) == 0 || c.entries[0].Next.IsZero() {
+			// If there are no entries yet, just sleep - it still handles new entries
+			// and stop requests.
+			timer = time.NewTimer(100000 * time.Hour)
+		} else {
+			timer = time.NewTimer(c.entries[0].Next.Sub(now))
+		}
+
+		for {
+			select {
+			case now = <-timer.C:
+				now = now.In(c.location)
+				// Run every entry whose next time was less than now
+				for _, e := range c.entries {
+					if e.Next.After(now) || e.Next.IsZero() {
+						break
+					}
+					go c.runWithRecovery(e.Job)
+					e.Prev = e.Next
+					e.Next = e.Schedule.Next(now)
+				}
+
+			case newEntry := <-c.add:
+				timer.Stop()
+				now = c.now()
+				newEntry.Next = newEntry.Schedule.Next(now)
+				c.entries = append(c.entries, newEntry)
+
+			case <-c.snapshot:
+				c.snapshot <- c.entrySnapshot()
+				continue
+
+			case <-c.stop:
+				timer.Stop()
+				return
+			}
+
+			break
+		}
+	}
+}
+
+// Logs an error to stderr or to the configured error log
+func (c *Cron) logf(format string, args ...interface{}) {
+	if c.ErrorLog != nil {
+		c.ErrorLog.Printf(format, args...)
+	} else {
+		log.Printf(format, args...)
+	}
+}
+
+// Stop stops the cron scheduler if it is running; otherwise it does nothing.
+func (c *Cron) Stop() {
+	if !c.running {
+		return
+	}
+	c.stop <- struct{}{}
+	c.running = false
+}
+
+// entrySnapshot returns a copy of the current cron entry list.
+func (c *Cron) entrySnapshot() []*Entry {
+	entries := []*Entry{}
+	for _, e := range c.entries {
+		entries = append(entries, &Entry{
+			Schedule: e.Schedule,
+			Next:     e.Next,
+			Prev:     e.Prev,
+			Job:      e.Job,
+		})
+	}
+	return entries
+}
+
+// now returns current time in c location
+func (c *Cron) now() time.Time {
+	return time.Now().In(c.location)
+}

+ 129 - 0
vendor/github.com/robfig/cron/doc.go

@@ -0,0 +1,129 @@
+/*
+Package cron implements a cron spec parser and job runner.
+
+Usage
+
+Callers may register Funcs to be invoked on a given schedule.  Cron will run
+them in their own goroutines.
+
+	c := cron.New()
+	c.AddFunc("0 30 * * * *", func() { fmt.Println("Every hour on the half hour") })
+	c.AddFunc("@hourly",      func() { fmt.Println("Every hour") })
+	c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty") })
+	c.Start()
+	..
+	// Funcs are invoked in their own goroutine, asynchronously.
+	...
+	// Funcs may also be added to a running Cron
+	c.AddFunc("@daily", func() { fmt.Println("Every day") })
+	..
+	// Inspect the cron job entries' next and previous run times.
+	inspect(c.Entries())
+	..
+	c.Stop()  // Stop the scheduler (does not stop any jobs already running).
+
+CRON Expression Format
+
+A cron expression represents a set of times, using 6 space-separated fields.
+
+	Field name   | Mandatory? | Allowed values  | Allowed special characters
+	----------   | ---------- | --------------  | --------------------------
+	Seconds      | Yes        | 0-59            | * / , -
+	Minutes      | Yes        | 0-59            | * / , -
+	Hours        | Yes        | 0-23            | * / , -
+	Day of month | Yes        | 1-31            | * / , - ?
+	Month        | Yes        | 1-12 or JAN-DEC | * / , -
+	Day of week  | Yes        | 0-6 or SUN-SAT  | * / , - ?
+
+Note: Month and Day-of-week field values are case insensitive.  "SUN", "Sun",
+and "sun" are equally accepted.
+
+Special Characters
+
+Asterisk ( * )
+
+The asterisk indicates that the cron expression will match for all values of the
+field; e.g., using an asterisk in the 5th field (month) would indicate every
+month.
+
+Slash ( / )
+
+Slashes are used to describe increments of ranges. For example 3-59/15 in the
+1st field (minutes) would indicate the 3rd minute of the hour and every 15
+minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...",
+that is, an increment over the largest possible range of the field.  The form
+"N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the
+increment until the end of that specific range.  It does not wrap around.
+
+Comma ( , )
+
+Commas are used to separate items of a list. For example, using "MON,WED,FRI" in
+the 5th field (day of week) would mean Mondays, Wednesdays and Fridays.
+
+Hyphen ( - )
+
+Hyphens are used to define ranges. For example, 9-17 would indicate every
+hour between 9am and 5pm inclusive.
+
+Question mark ( ? )
+
+Question mark may be used instead of '*' for leaving either day-of-month or
+day-of-week blank.
+
+Predefined schedules
+
+You may use one of several pre-defined schedules in place of a cron expression.
+
+	Entry                  | Description                                | Equivalent To
+	-----                  | -----------                                | -------------
+	@yearly (or @annually) | Run once a year, midnight, Jan. 1st        | 0 0 0 1 1 *
+	@monthly               | Run once a month, midnight, first of month | 0 0 0 1 * *
+	@weekly                | Run once a week, midnight between Sat/Sun  | 0 0 0 * * 0
+	@daily (or @midnight)  | Run once a day, midnight                   | 0 0 0 * * *
+	@hourly                | Run once an hour, beginning of hour        | 0 0 * * * *
+
+Intervals
+
+You may also schedule a job to execute at fixed intervals, starting at the time it's added 
+or cron is run. This is supported by formatting the cron spec like this:
+
+    @every <duration>
+
+where "duration" is a string accepted by time.ParseDuration
+(http://golang.org/pkg/time/#ParseDuration).
+
+For example, "@every 1h30m10s" would indicate a schedule that activates after
+1 hour, 30 minutes, 10 seconds, and then every interval after that.
+
+Note: The interval does not take the job runtime into account.  For example,
+if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes,
+it will have only 2 minutes of idle time between each run.
+
+Time zones
+
+All interpretation and scheduling is done in the machine's local time zone (as
+provided by the Go time package (http://www.golang.org/pkg/time).
+
+Be aware that jobs scheduled during daylight-savings leap-ahead transitions will
+not be run!
+
+Thread safety
+
+Since the Cron service runs concurrently with the calling code, some amount of
+care must be taken to ensure proper synchronization.
+
+All cron methods are designed to be correctly synchronized as long as the caller
+ensures that invocations have a clear happens-before ordering between them.
+
+Implementation
+
+Cron entries are stored in an array, sorted by their next activation time.  Cron
+sleeps until the next job is due to be run.
+
+Upon waking:
+ - it runs each entry that is active on that second
+ - it calculates the next run times for the jobs that were run
+ - it re-sorts the array of entries by next activation time.
+ - it goes to sleep until the soonest job.
+*/
+package cron

+ 380 - 0
vendor/github.com/robfig/cron/parser.go

@@ -0,0 +1,380 @@
+package cron
+
+import (
+	"fmt"
+	"math"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// Configuration options for creating a parser. Most options specify which
+// fields should be included, while others enable features. If a field is not
+// included the parser will assume a default value. These options do not change
+// the order fields are parse in.
+type ParseOption int
+
+const (
+	Second      ParseOption = 1 << iota // Seconds field, default 0
+	Minute                              // Minutes field, default 0
+	Hour                                // Hours field, default 0
+	Dom                                 // Day of month field, default *
+	Month                               // Month field, default *
+	Dow                                 // Day of week field, default *
+	DowOptional                         // Optional day of week field, default *
+	Descriptor                          // Allow descriptors such as @monthly, @weekly, etc.
+)
+
+var places = []ParseOption{
+	Second,
+	Minute,
+	Hour,
+	Dom,
+	Month,
+	Dow,
+}
+
+var defaults = []string{
+	"0",
+	"0",
+	"0",
+	"*",
+	"*",
+	"*",
+}
+
+// A custom Parser that can be configured.
+type Parser struct {
+	options   ParseOption
+	optionals int
+}
+
+// Creates a custom Parser with custom options.
+//
+//  // Standard parser without descriptors
+//  specParser := NewParser(Minute | Hour | Dom | Month | Dow)
+//  sched, err := specParser.Parse("0 0 15 */3 *")
+//
+//  // Same as above, just excludes time fields
+//  subsParser := NewParser(Dom | Month | Dow)
+//  sched, err := specParser.Parse("15 */3 *")
+//
+//  // Same as above, just makes Dow optional
+//  subsParser := NewParser(Dom | Month | DowOptional)
+//  sched, err := specParser.Parse("15 */3")
+//
+func NewParser(options ParseOption) Parser {
+	optionals := 0
+	if options&DowOptional > 0 {
+		options |= Dow
+		optionals++
+	}
+	return Parser{options, optionals}
+}
+
+// Parse returns a new crontab schedule representing the given spec.
+// It returns a descriptive error if the spec is not valid.
+// It accepts crontab specs and features configured by NewParser.
+func (p Parser) Parse(spec string) (Schedule, error) {
+	if len(spec) == 0 {
+		return nil, fmt.Errorf("Empty spec string")
+	}
+	if spec[0] == '@' && p.options&Descriptor > 0 {
+		return parseDescriptor(spec)
+	}
+
+	// Figure out how many fields we need
+	max := 0
+	for _, place := range places {
+		if p.options&place > 0 {
+			max++
+		}
+	}
+	min := max - p.optionals
+
+	// Split fields on whitespace
+	fields := strings.Fields(spec)
+
+	// Validate number of fields
+	if count := len(fields); count < min || count > max {
+		if min == max {
+			return nil, fmt.Errorf("Expected exactly %d fields, found %d: %s", min, count, spec)
+		}
+		return nil, fmt.Errorf("Expected %d to %d fields, found %d: %s", min, max, count, spec)
+	}
+
+	// Fill in missing fields
+	fields = expandFields(fields, p.options)
+
+	var err error
+	field := func(field string, r bounds) uint64 {
+		if err != nil {
+			return 0
+		}
+		var bits uint64
+		bits, err = getField(field, r)
+		return bits
+	}
+
+	var (
+		second     = field(fields[0], seconds)
+		minute     = field(fields[1], minutes)
+		hour       = field(fields[2], hours)
+		dayofmonth = field(fields[3], dom)
+		month      = field(fields[4], months)
+		dayofweek  = field(fields[5], dow)
+	)
+	if err != nil {
+		return nil, err
+	}
+
+	return &SpecSchedule{
+		Second: second,
+		Minute: minute,
+		Hour:   hour,
+		Dom:    dayofmonth,
+		Month:  month,
+		Dow:    dayofweek,
+	}, nil
+}
+
+func expandFields(fields []string, options ParseOption) []string {
+	n := 0
+	count := len(fields)
+	expFields := make([]string, len(places))
+	copy(expFields, defaults)
+	for i, place := range places {
+		if options&place > 0 {
+			expFields[i] = fields[n]
+			n++
+		}
+		if n == count {
+			break
+		}
+	}
+	return expFields
+}
+
+var standardParser = NewParser(
+	Minute | Hour | Dom | Month | Dow | Descriptor,
+)
+
+// ParseStandard returns a new crontab schedule representing the given standardSpec
+// (https://en.wikipedia.org/wiki/Cron). It differs from Parse requiring to always
+// pass 5 entries representing: minute, hour, day of month, month and day of week,
+// in that order. It returns a descriptive error if the spec is not valid.
+//
+// It accepts
+//   - Standard crontab specs, e.g. "* * * * ?"
+//   - Descriptors, e.g. "@midnight", "@every 1h30m"
+func ParseStandard(standardSpec string) (Schedule, error) {
+	return standardParser.Parse(standardSpec)
+}
+
+var defaultParser = NewParser(
+	Second | Minute | Hour | Dom | Month | DowOptional | Descriptor,
+)
+
+// Parse returns a new crontab schedule representing the given spec.
+// It returns a descriptive error if the spec is not valid.
+//
+// It accepts
+//   - Full crontab specs, e.g. "* * * * * ?"
+//   - Descriptors, e.g. "@midnight", "@every 1h30m"
+func Parse(spec string) (Schedule, error) {
+	return defaultParser.Parse(spec)
+}
+
+// getField returns an Int with the bits set representing all of the times that
+// the field represents or error parsing field value.  A "field" is a comma-separated
+// list of "ranges".
+func getField(field string, r bounds) (uint64, error) {
+	var bits uint64
+	ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' })
+	for _, expr := range ranges {
+		bit, err := getRange(expr, r)
+		if err != nil {
+			return bits, err
+		}
+		bits |= bit
+	}
+	return bits, nil
+}
+
+// getRange returns the bits indicated by the given expression:
+//   number | number "-" number [ "/" number ]
+// or error parsing range.
+func getRange(expr string, r bounds) (uint64, error) {
+	var (
+		start, end, step uint
+		rangeAndStep     = strings.Split(expr, "/")
+		lowAndHigh       = strings.Split(rangeAndStep[0], "-")
+		singleDigit      = len(lowAndHigh) == 1
+		err              error
+	)
+
+	var extra uint64
+	if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" {
+		start = r.min
+		end = r.max
+		extra = starBit
+	} else {
+		start, err = parseIntOrName(lowAndHigh[0], r.names)
+		if err != nil {
+			return 0, err
+		}
+		switch len(lowAndHigh) {
+		case 1:
+			end = start
+		case 2:
+			end, err = parseIntOrName(lowAndHigh[1], r.names)
+			if err != nil {
+				return 0, err
+			}
+		default:
+			return 0, fmt.Errorf("Too many hyphens: %s", expr)
+		}
+	}
+
+	switch len(rangeAndStep) {
+	case 1:
+		step = 1
+	case 2:
+		step, err = mustParseInt(rangeAndStep[1])
+		if err != nil {
+			return 0, err
+		}
+
+		// Special handling: "N/step" means "N-max/step".
+		if singleDigit {
+			end = r.max
+		}
+	default:
+		return 0, fmt.Errorf("Too many slashes: %s", expr)
+	}
+
+	if start < r.min {
+		return 0, fmt.Errorf("Beginning of range (%d) below minimum (%d): %s", start, r.min, expr)
+	}
+	if end > r.max {
+		return 0, fmt.Errorf("End of range (%d) above maximum (%d): %s", end, r.max, expr)
+	}
+	if start > end {
+		return 0, fmt.Errorf("Beginning of range (%d) beyond end of range (%d): %s", start, end, expr)
+	}
+	if step == 0 {
+		return 0, fmt.Errorf("Step of range should be a positive number: %s", expr)
+	}
+
+	return getBits(start, end, step) | extra, nil
+}
+
+// parseIntOrName returns the (possibly-named) integer contained in expr.
+func parseIntOrName(expr string, names map[string]uint) (uint, error) {
+	if names != nil {
+		if namedInt, ok := names[strings.ToLower(expr)]; ok {
+			return namedInt, nil
+		}
+	}
+	return mustParseInt(expr)
+}
+
+// mustParseInt parses the given expression as an int or returns an error.
+func mustParseInt(expr string) (uint, error) {
+	num, err := strconv.Atoi(expr)
+	if err != nil {
+		return 0, fmt.Errorf("Failed to parse int from %s: %s", expr, err)
+	}
+	if num < 0 {
+		return 0, fmt.Errorf("Negative number (%d) not allowed: %s", num, expr)
+	}
+
+	return uint(num), nil
+}
+
+// getBits sets all bits in the range [min, max], modulo the given step size.
+func getBits(min, max, step uint) uint64 {
+	var bits uint64
+
+	// If step is 1, use shifts.
+	if step == 1 {
+		return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min)
+	}
+
+	// Else, use a simple loop.
+	for i := min; i <= max; i += step {
+		bits |= 1 << i
+	}
+	return bits
+}
+
+// all returns all bits within the given bounds.  (plus the star bit)
+func all(r bounds) uint64 {
+	return getBits(r.min, r.max, 1) | starBit
+}
+
+// parseDescriptor returns a predefined schedule for the expression, or error if none matches.
+func parseDescriptor(descriptor string) (Schedule, error) {
+	switch descriptor {
+	case "@yearly", "@annually":
+		return &SpecSchedule{
+			Second: 1 << seconds.min,
+			Minute: 1 << minutes.min,
+			Hour:   1 << hours.min,
+			Dom:    1 << dom.min,
+			Month:  1 << months.min,
+			Dow:    all(dow),
+		}, nil
+
+	case "@monthly":
+		return &SpecSchedule{
+			Second: 1 << seconds.min,
+			Minute: 1 << minutes.min,
+			Hour:   1 << hours.min,
+			Dom:    1 << dom.min,
+			Month:  all(months),
+			Dow:    all(dow),
+		}, nil
+
+	case "@weekly":
+		return &SpecSchedule{
+			Second: 1 << seconds.min,
+			Minute: 1 << minutes.min,
+			Hour:   1 << hours.min,
+			Dom:    all(dom),
+			Month:  all(months),
+			Dow:    1 << dow.min,
+		}, nil
+
+	case "@daily", "@midnight":
+		return &SpecSchedule{
+			Second: 1 << seconds.min,
+			Minute: 1 << minutes.min,
+			Hour:   1 << hours.min,
+			Dom:    all(dom),
+			Month:  all(months),
+			Dow:    all(dow),
+		}, nil
+
+	case "@hourly":
+		return &SpecSchedule{
+			Second: 1 << seconds.min,
+			Minute: 1 << minutes.min,
+			Hour:   all(hours),
+			Dom:    all(dom),
+			Month:  all(months),
+			Dow:    all(dow),
+		}, nil
+	}
+
+	const every = "@every "
+	if strings.HasPrefix(descriptor, every) {
+		duration, err := time.ParseDuration(descriptor[len(every):])
+		if err != nil {
+			return nil, fmt.Errorf("Failed to parse duration %s: %s", descriptor, err)
+		}
+		return Every(duration), nil
+	}
+
+	return nil, fmt.Errorf("Unrecognized descriptor: %s", descriptor)
+}

+ 158 - 0
vendor/github.com/robfig/cron/spec.go

@@ -0,0 +1,158 @@
+package cron
+
+import "time"
+
+// SpecSchedule specifies a duty cycle (to the second granularity), based on a
+// traditional crontab specification. It is computed initially and stored as bit sets.
+type SpecSchedule struct {
+	Second, Minute, Hour, Dom, Month, Dow uint64
+}
+
+// bounds provides a range of acceptable values (plus a map of name to value).
+type bounds struct {
+	min, max uint
+	names    map[string]uint
+}
+
+// The bounds for each field.
+var (
+	seconds = bounds{0, 59, nil}
+	minutes = bounds{0, 59, nil}
+	hours   = bounds{0, 23, nil}
+	dom     = bounds{1, 31, nil}
+	months  = bounds{1, 12, map[string]uint{
+		"jan": 1,
+		"feb": 2,
+		"mar": 3,
+		"apr": 4,
+		"may": 5,
+		"jun": 6,
+		"jul": 7,
+		"aug": 8,
+		"sep": 9,
+		"oct": 10,
+		"nov": 11,
+		"dec": 12,
+	}}
+	dow = bounds{0, 6, map[string]uint{
+		"sun": 0,
+		"mon": 1,
+		"tue": 2,
+		"wed": 3,
+		"thu": 4,
+		"fri": 5,
+		"sat": 6,
+	}}
+)
+
+const (
+	// Set the top bit if a star was included in the expression.
+	starBit = 1 << 63
+)
+
+// Next returns the next time this schedule is activated, greater than the given
+// time.  If no time can be found to satisfy the schedule, return the zero time.
+func (s *SpecSchedule) Next(t time.Time) time.Time {
+	// General approach:
+	// For Month, Day, Hour, Minute, Second:
+	// Check if the time value matches.  If yes, continue to the next field.
+	// If the field doesn't match the schedule, then increment the field until it matches.
+	// While incrementing the field, a wrap-around brings it back to the beginning
+	// of the field list (since it is necessary to re-verify previous field
+	// values)
+
+	// Start at the earliest possible time (the upcoming second).
+	t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond)
+
+	// This flag indicates whether a field has been incremented.
+	added := false
+
+	// If no time is found within five years, return zero.
+	yearLimit := t.Year() + 5
+
+WRAP:
+	if t.Year() > yearLimit {
+		return time.Time{}
+	}
+
+	// Find the first applicable month.
+	// If it's this month, then do nothing.
+	for 1<<uint(t.Month())&s.Month == 0 {
+		// If we have to add a month, reset the other parts to 0.
+		if !added {
+			added = true
+			// Otherwise, set the date at the beginning (since the current time is irrelevant).
+			t = time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, t.Location())
+		}
+		t = t.AddDate(0, 1, 0)
+
+		// Wrapped around.
+		if t.Month() == time.January {
+			goto WRAP
+		}
+	}
+
+	// Now get a day in that month.
+	for !dayMatches(s, t) {
+		if !added {
+			added = true
+			t = time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location())
+		}
+		t = t.AddDate(0, 0, 1)
+
+		if t.Day() == 1 {
+			goto WRAP
+		}
+	}
+
+	for 1<<uint(t.Hour())&s.Hour == 0 {
+		if !added {
+			added = true
+			t = time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, t.Location())
+		}
+		t = t.Add(1 * time.Hour)
+
+		if t.Hour() == 0 {
+			goto WRAP
+		}
+	}
+
+	for 1<<uint(t.Minute())&s.Minute == 0 {
+		if !added {
+			added = true
+			t = t.Truncate(time.Minute)
+		}
+		t = t.Add(1 * time.Minute)
+
+		if t.Minute() == 0 {
+			goto WRAP
+		}
+	}
+
+	for 1<<uint(t.Second())&s.Second == 0 {
+		if !added {
+			added = true
+			t = t.Truncate(time.Second)
+		}
+		t = t.Add(1 * time.Second)
+
+		if t.Second() == 0 {
+			goto WRAP
+		}
+	}
+
+	return t
+}
+
+// dayMatches returns true if the schedule's day-of-week and day-of-month
+// restrictions are satisfied by the given time.
+func dayMatches(s *SpecSchedule, t time.Time) bool {
+	var (
+		domMatch bool = 1<<uint(t.Day())&s.Dom > 0
+		dowMatch bool = 1<<uint(t.Weekday())&s.Dow > 0
+	)
+	if s.Dom&starBit > 0 || s.Dow&starBit > 0 {
+		return domMatch && dowMatch
+	}
+	return domMatch || dowMatch
+}

+ 1 - 0
vendor/gogs.carducci-dante.gov.it/karmen/client/.gitignore

@@ -0,0 +1 @@
+*~

+ 475 - 0
vendor/gogs.carducci-dante.gov.it/karmen/client/client.go

@@ -0,0 +1,475 @@
+package client
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"strconv"
+
+	"gogs.carducci-dante.gov.it/karmen/core/orm"
+	"gogs.carducci-dante.gov.it/karmen/core/renderer"
+)
+
+// A client represents a client connection to the Headmaster test
+// server.
+type Client struct {
+	Url      *url.URL
+	Username string
+	Password string
+	User     string
+
+	token string
+}
+
+// Dial connects to a test server instance at the specified address
+// using the given credentials.
+func Dial(host, username, password string) (*Client, error) {
+	url, err := url.Parse(host)
+	if err != nil {
+		return nil, err
+	}
+
+	client := &Client{
+		Url:      url,
+		Username: username,
+		Password: password,
+	}
+
+	response, err := client.SendRequest("GET", "get_token", nil)
+	if err != nil {
+		panic(err)
+	}
+
+	var data struct {
+		Token string
+		User  string
+	}
+	if err := json.Unmarshal(response, &data); err != nil {
+		panic(err)
+	}
+	client.token = data.Token
+	client.User = data.User
+
+	return client, nil
+}
+
+func (c *Client) SendRequest(method string, path string, data []byte) ([]byte, error) {
+	// Create the https request
+
+	folderUrl, err := url.Parse(path)
+	if err != nil {
+		return nil, err
+	}
+
+	client := &http.Client{}
+	req, err := http.NewRequest(method, c.Url.ResolveReference(folderUrl).String(), bytes.NewReader(data))
+	if err != nil {
+		return nil, err
+	}
+
+	req.Header.Set("Content-Type", "application/json")
+	req.SetBasicAuth(c.Username, c.Password)
+
+	if c.token != "" {
+		req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token))
+	}
+
+	resp, err := client.Do(req)
+	if err != nil {
+		return nil, err
+	}
+
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return nil, err
+	}
+
+	return body, nil
+}
+
+func (c *Client) GetTeachers() ([]*orm.Teacher, error) {
+	var (
+		response renderer.JsonResponse
+		teachers []*orm.Teacher
+	)
+
+	data, err := c.SendRequest("GET", "/api/teachers?format=json", nil)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := json.Unmarshal(data, &response); err != nil {
+		return nil, err
+	}
+
+	if string(response.Error) != "" {
+		return nil, errors.New(string(response.Error))
+	}
+
+	if err := json.Unmarshal(response.Result, &teachers); err != nil {
+		return nil, err
+	}
+	return teachers, nil
+}
+
+func (c *Client) GetTeacher(id uint) (*orm.Teacher, error) {
+	var (
+		response renderer.JsonResponse
+		teacher  *orm.Teacher
+	)
+
+	data, err := c.SendRequest("GET", fmt.Sprintf("/api/teachers/%d?format=json", id), nil)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := json.Unmarshal(data, &response); err != nil {
+		return nil, err
+	}
+
+	if string(response.Error) != "" {
+		return nil, errors.New(string(response.Error))
+	}
+
+	if err := json.Unmarshal(response.Result, &teacher); err != nil {
+		return nil, err
+	}
+	return teacher, nil
+}
+
+func (c *Client) GetStudents() ([]*orm.Student, error) {
+	var (
+		response renderer.JsonResponse
+		students []*orm.Student
+	)
+
+	data, err := c.SendRequest("GET", "/api/students?format=json", nil)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := json.Unmarshal(data, &response); err != nil {
+		return nil, err
+	}
+
+	if string(response.Error) != "" {
+		return nil, errors.New(string(response.Error))
+	}
+
+	if err := json.Unmarshal(response.Result, &students); err != nil {
+		return nil, err
+	}
+	return students, nil
+}
+
+func (c *Client) GetSubjects() ([]*orm.Subject, error) {
+	var (
+		response renderer.JsonResponse
+		subjects []*orm.Subject
+	)
+
+	data, err := c.SendRequest("GET", "/api/subjects?format=json", nil)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := json.Unmarshal(data, &response); err != nil {
+		return nil, err
+	}
+
+	if string(response.Error) != "" {
+		return nil, errors.New(string(response.Error))
+	}
+
+	if err := json.Unmarshal(response.Result, &subjects); err != nil {
+		return nil, err
+	}
+	return subjects, nil
+}
+
+func (c *Client) AddSubject(subject *orm.Subject) error {
+	var response renderer.JsonResponse
+
+	data, err := json.Marshal(subject)
+	if err != nil {
+		return err
+	}
+	resp, err := c.SendRequest("POST", "/api/subjects/add/?format=json", data)
+	if err != nil {
+		return err
+	}
+	if err := json.Unmarshal(resp, &response); err != nil {
+		return err
+	}
+	if string(response.Error) != "" {
+		return errors.New(string(response.Error))
+	}
+	return nil
+}
+
+func (c *Client) GetDepartments() ([]*orm.Department, error) {
+	var (
+		response    renderer.JsonResponse
+		departments []*orm.Department
+	)
+
+	data, err := c.SendRequest("GET", "/api/departments?format=json", nil)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := json.Unmarshal(data, &response); err != nil {
+		return nil, err
+	}
+
+	if string(response.Error) != "" {
+		return nil, errors.New(string(response.Error))
+	}
+
+	if err := json.Unmarshal(response.Result, &departments); err != nil {
+		return nil, err
+	}
+	return departments, nil
+}
+
+func (c *Client) GetClasses() ([]*orm.Class, error) {
+	var (
+		response renderer.JsonResponse
+		classes  []*orm.Class
+	)
+
+	data, err := c.SendRequest("GET", "/api/classes?format=json", nil)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := json.Unmarshal(data, &response); err != nil {
+		return nil, err
+	}
+
+	if string(response.Error) != "" {
+		return nil, errors.New(string(response.Error))
+	}
+
+	if err := json.Unmarshal(response.Result, &classes); err != nil {
+		return nil, err
+	}
+	return classes, nil
+}
+
+func (c *Client) GetClass(id uint) (*orm.Class, error) {
+	var (
+		response renderer.JsonResponse
+		teacher  *orm.Class
+	)
+
+	data, err := c.SendRequest("GET", fmt.Sprintf("/api/classes/%d?format=json", id), nil)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := json.Unmarshal(data, &response); err != nil {
+		return nil, err
+	}
+
+	if string(response.Error) != "" {
+		return nil, errors.New(string(response.Error))
+	}
+
+	if err := json.Unmarshal(response.Result, &teacher); err != nil {
+		return nil, err
+	}
+	return teacher, nil
+}
+
+func (c *Client) AddClass(class *orm.Class) error {
+	var response renderer.JsonResponse
+
+	data, err := json.Marshal(class)
+	if err != nil {
+		return err
+	}
+	resp, err := c.SendRequest("POST", "/api/classes/add/?format=json", data)
+	if err != nil {
+		return err
+	}
+	if err := json.Unmarshal(resp, &response); err != nil {
+		return err
+	}
+	if string(response.Error) != "" {
+		return errors.New(string(response.Error))
+	}
+	return nil
+}
+
+func (c *Client) GetActivities() ([]*orm.Activity, error) {
+	var (
+		response   renderer.JsonResponse
+		activities []*orm.Activity
+	)
+
+	data, err := c.SendRequest("GET", "/api/activities?format=json", nil)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := json.Unmarshal(data, &response); err != nil {
+		return nil, err
+	}
+
+	if string(response.Error) != "" {
+		return nil, errors.New(string(response.Error))
+	}
+
+	if err := json.Unmarshal(response.Result, &activities); err != nil {
+		return nil, err
+	}
+	return activities, nil
+}
+
+func (c *Client) AddStudent(student *orm.Student) (uint, error) {
+	var response renderer.JsonResponse
+
+	data, err := json.Marshal(student)
+	if err != nil {
+		return 0, err
+	}
+	resp, err := c.SendRequest("POST", "/api/students/add/?format=json", data)
+	if err != nil {
+		return 0, err
+	}
+	if err := json.Unmarshal(resp, &response); err != nil {
+		return 0, err
+	}
+	if string(response.Error) != "" {
+		return 0, errors.New(string(response.Error))
+	}
+
+	id, err := strconv.Atoi(string(response.Result))
+	if err != nil {
+		return 0, err
+	}
+	return uint(id), nil
+}
+
+func (c *Client) AddActivity(activity *orm.Activity) (uint, error) {
+	var response renderer.JsonResponse
+	data, err := json.Marshal(activity)
+	if err != nil {
+		return 0, err
+	}
+	resp, err := c.SendRequest("POST", "/api/activities/add/?format=json", data)
+	if err != nil {
+		return 0, err
+	}
+
+	if err := json.Unmarshal(resp, &response); err != nil {
+		return 0, err
+	}
+	if string(response.Error) != "" {
+		return 0, errors.New(string(response.Error))
+	}
+
+	id, err := strconv.Atoi(string(response.Result))
+	if err != nil {
+		return 0, err
+	}
+
+	return uint(id), nil
+}
+
+func (c *Client) DeleteActivity(activity *orm.Activity) (uint, error) {
+	var response renderer.JsonResponse
+	data, err := json.Marshal(activity)
+	if err != nil {
+		return 0, err
+	}
+	resp, err := c.SendRequest("DELETE", fmt.Sprintf("/api/activities/%d/delete?format=json", activity.ID), data)
+	if err != nil {
+		return 0, err
+	}
+
+	if err := json.Unmarshal(resp, &response); err != nil {
+		return 0, err
+	}
+	if string(response.Error) != "" {
+		return 0, errors.New(string(response.Error))
+	}
+
+	id, err := strconv.Atoi(string(response.Result))
+	if err != nil {
+		return 0, err
+	}
+
+	return uint(id), nil
+}
+
+func (c *Client) UpdateActivity(activity *orm.Activity) error {
+	data, err := json.Marshal(activity)
+	if err != nil {
+		return err
+
+	}
+	_, err = c.SendRequest("POST", fmt.Sprintf("/api/activities/%d/update?format=json", activity.ID), data)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+func (c *Client) UpdateStudent(student *orm.Student) error {
+	data, err := json.Marshal(student)
+	if err != nil {
+		return err
+
+	}
+	_, err = c.SendRequest("POST", fmt.Sprintf("/api/students/%d/update?format=json", student.ID), data)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+func (c *Client) UpdateTeacher(teacher *orm.Teacher) error {
+	var response renderer.JsonResponse
+
+	data, err := json.Marshal(teacher)
+	if err != nil {
+		return err
+	}
+	resp, err := c.SendRequest("POST", fmt.Sprintf("/api/teachers/%d/update?format=json", teacher.ID), data)
+	if err != nil {
+		return err
+	}
+	if err := json.Unmarshal(resp, &response); err != nil {
+		return err
+	}
+	if string(response.Error) != "" {
+		return errors.New(string(response.Error))
+	}
+	return nil
+}
+
+func (c *Client) AddTeacher(teacher *orm.Teacher) error {
+	var response renderer.JsonResponse
+
+	data, err := json.Marshal(teacher)
+	if err != nil {
+		return err
+	}
+	resp, err := c.SendRequest("POST", "/api/teachers/add/?format=json", data)
+	if err != nil {
+		return err
+	}
+	if err := json.Unmarshal(resp, &response); err != nil {
+		return err
+	}
+	if string(response.Error) != "" {
+		return errors.New(string(response.Error))
+	}
+	return nil
+}

+ 1 - 0
vendor/gogs.carducci-dante.gov.it/karmen/ldap/.gitignore

@@ -0,0 +1 @@
+**/*~

+ 370 - 0
vendor/gogs.carducci-dante.gov.it/karmen/ldap/ldap.go

@@ -0,0 +1,370 @@
+package ldap
+
+import (
+	"errors"
+	"fmt"
+	"sort"
+	"strconv"
+
+	"gogs.carducci-dante.gov.it/karmen/core/config"
+	"gogs.carducci-dante.gov.it/karmen/core/orm"
+	ldap "gopkg.in/ldap.v2"
+)
+
+type CompleteNameI interface {
+	CompleteName() string
+}
+
+type Client struct {
+	Conn   *ldap.Conn
+	Config *config.ConfigT
+}
+
+func (c *Client) Close() {
+	c.Conn.Close()
+}
+
+func NewClient(host string, config *config.ConfigT) (*Client, error) {
+	var err error
+
+	client := new(Client)
+	client.Config = config
+
+	client.Conn, err = ldap.Dial("tcp", host)
+	if err != nil {
+		return nil, err
+	}
+
+	err = client.Conn.Bind(config.AdminCN(), config.Ldap.AdminPassword)
+	if err != nil {
+		return nil, err
+	}
+	return client, nil
+}
+
+func (c *Client) AddTeacher(teacher *orm.Teacher) error {
+	mailDir := fmt.Sprintf("%s/%s/", c.Config.Ldap.MailDirBasePath, teacher.Username())
+	uidNumber, err := c.NextMailUIDNumber()
+	if err != nil {
+		return err
+	}
+	addRequest := ldap.NewAddRequest(c.TeacherDN(teacher))
+	addRequest.Attribute("objectClass", []string{
+		"inetOrgPerson",
+		"posixAccount",
+		"PostfixBookMailAccount",
+		"organizationalPerson",
+		"extensibleObject",
+	})
+	addRequest.Attribute("sn", []string{teacher.Surname})
+	addRequest.Attribute("uid", []string{teacher.Username()})
+	addRequest.Attribute("homeDirectory", []string{fmt.Sprintf("/home/users/%s", teacher.Username())})
+	addRequest.Attribute("givenName", []string{teacher.Surname})
+	addRequest.Attribute("mail", []string{fmt.Sprintf("%s@%s", teacher.Username(), c.Config.Domain)})
+	addRequest.Attribute("mailEnabled", []string{"TRUE"})
+	addRequest.Attribute("mailGidNumber", []string{c.Config.Ldap.MailGIDNumber})
+	addRequest.Attribute("mailUidNumber", []string{uidNumber})
+	addRequest.Attribute("uidNumber", []string{uidNumber})
+	addRequest.Attribute("gidNumber", []string{uidNumber})
+	addRequest.Attribute("uniqueIdentifier", []string{teacher.Username()})
+	addRequest.Attribute("mailHomeDirectory", []string{mailDir})
+	addRequest.Attribute("mailStorageDirectory", []string{"maildir:" + mailDir})
+	addRequest.Attribute("mailQuota", []string{c.Config.Ldap.MailQuota})
+	addRequest.Attribute("userPassword", []string{fmt.Sprintf("{SSHA}%s", teacher.SaltPassword(teacher.PlainPassword))})
+
+	err = c.Conn.Add(addRequest)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (c *Client) UpdateTeacher(teacher *orm.Teacher) error {
+	mailDir := fmt.Sprintf("%s/%s/", c.Config.Ldap.MailDirBasePath, teacher.Username())
+	modRequest := ldap.NewModifyRequest(c.TeacherDN(teacher))
+	modRequest.Replace("mail", []string{fmt.Sprintf("%s@%s", teacher.Username(), c.Config.Domain)})
+	modRequest.Replace("mailHomeDirectory", []string{mailDir})
+	modRequest.Replace("mailStorageDirectory", []string{"maildir:" + mailDir})
+	modRequest.Replace("mailEnabled", []string{"TRUE"})
+	modRequest.Replace("mailGidNumber", []string{c.Config.Ldap.MailGIDNumber})
+	modRequest.Replace("mailQuota", []string{c.Config.Ldap.MailQuota})
+
+	err := c.Conn.Modify(modRequest)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (c *Client) UpdateTeacherPassword(teacher *orm.Teacher, password string) error {
+	modRequest := ldap.NewModifyRequest(c.TeacherDN(teacher))
+	modRequest.Replace("userPassword", []string{fmt.Sprintf("{SSHA}%s", teacher.SaltPassword(password))})
+	err := c.Conn.Modify(modRequest)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+func (c *Client) DeleteTeacher(teacher *orm.Teacher) error {
+	delRequest := ldap.NewDelRequest(c.TeacherDN(teacher), nil)
+	err := c.Conn.Del(delRequest)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (c *Client) DeleteByDN(dn string) error {
+	delRequest := ldap.NewDelRequest(dn, nil)
+	err := c.Conn.Del(delRequest)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (c *Client) AddTeacherToGroup(teacher *orm.Teacher, groupDN string) error {
+	memberAttr, err := c.memberAttribute(groupDN)
+	if err != nil {
+		return err
+	}
+	modRequest := ldap.NewModifyRequest(fmt.Sprintf("%s,%s", groupDN, c.GroupsDN()))
+	switch memberAttr {
+	case "member":
+		modRequest.Add(memberAttr, []string{c.personDN(teacher, c.TeachersDN())})
+	case "memberuid":
+		modRequest.Add(memberAttr, []string{teacher.Username()})
+	default:
+		return errors.New(fmt.Sprintf("Attribute %s is not supported!", memberAttr))
+	}
+	err = c.Conn.Modify(modRequest)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (c *Client) RemoveTeacherFromGroup(teacher *orm.Teacher, groupDN string) error {
+	memberAttr, err := c.memberAttribute(groupDN)
+	if err != nil {
+		return err
+	}
+	modRequest := ldap.NewModifyRequest(fmt.Sprintf("%s,%s", groupDN, c.GroupsDN()))
+	switch memberAttr {
+	case "member":
+		modRequest.Delete(memberAttr, []string{c.personDN(teacher, c.TeachersDN())})
+	case "memberuid":
+		modRequest.Delete(memberAttr, []string{teacher.Username()})
+	default:
+		return errors.New(fmt.Sprintf("Attribute %s is not supported!", memberAttr))
+	}
+	err = c.Conn.Modify(modRequest)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (c *Client) RemoveTeacherFromGroupByMemberValue(memberValue string, groupDN string) error {
+	memberAttr, err := c.memberAttribute(groupDN)
+	if err != nil {
+		return err
+	}
+	modRequest := ldap.NewModifyRequest(fmt.Sprintf("%s,%s", groupDN, c.GroupsDN()))
+	switch memberAttr {
+	case "member":
+		modRequest.Delete(memberAttr, []string{memberValue})
+	case "memberuid":
+		modRequest.Delete(memberAttr, []string{memberValue})
+	default:
+		return errors.New(fmt.Sprintf("Attribute %s is not supported!", memberAttr))
+	}
+	err = c.Conn.Modify(modRequest)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (c *Client) IsTeacherInGroup(teacher *orm.Teacher, groupDN string) (bool, error) {
+	memberAttr, err := c.memberAttribute(groupDN)
+	if err != nil {
+		return false, err
+	}
+	var memberValue string
+	switch memberAttr {
+	case "member":
+		memberValue = c.personDN(teacher, c.TeachersDN())
+	case "memberuid":
+		memberValue = teacher.Username()
+	default:
+		return false, errors.New(fmt.Sprintf("Attribute %s is not supported!", memberAttr))
+	}
+
+	searchRequest := ldap.NewSearchRequest(
+		fmt.Sprintf("%s,%s", groupDN, c.GroupsDN()),
+		ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
+		fmt.Sprintf("(&(%s=%s))", memberAttr, memberValue),
+		[]string{memberAttr},
+		nil,
+	)
+
+	sr, err := c.Conn.Search(searchRequest)
+	if err != nil {
+		return false, err
+	}
+
+	return len(sr.Entries) > 0, nil
+}
+
+func (c *Client) GroupMembers(groupDN string) ([]*ldap.Entry, error) {
+	searchRequest := ldap.NewSearchRequest(
+		fmt.Sprintf("%s,%s", groupDN, c.GroupsDN()),
+		ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
+		"(|(member=*)(memberuid=*))",
+		[]string{"member", "memberuid"},
+		nil,
+	)
+	sr, err := c.Conn.Search(searchRequest)
+	if err != nil {
+		return nil, err
+	}
+
+	return sr.Entries, nil
+}
+
+func (c *Client) Teachers() ([]*ldap.Entry, error) {
+	result, err := c.Search(
+		c.TeachersDN(),
+		"(&(objectClass=organizationalPerson))",
+		[]string{"dn", "cn"},
+	)
+	if err != nil {
+		return nil, err
+	}
+
+	return result.Entries, nil
+}
+
+func (c *Client) Search(base string, filter string, attributes []string) (*ldap.SearchResult, error) {
+	searchRequest := ldap.NewSearchRequest(
+		base,
+		ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
+		filter,
+		attributes,
+		nil,
+	)
+
+	sr, err := c.Conn.Search(searchRequest)
+	if err != nil {
+		return nil, err
+	}
+
+	return sr, nil
+}
+
+func (c *Client) TeacherExists(teacher *orm.Teacher) (bool, error) {
+	searchRequest := ldap.NewSearchRequest(
+		c.TeachersDN(),
+		ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
+		fmt.Sprintf("(&(objectClass=organizationalPerson)(cn=%s))", teacher.CompleteName()),
+
+		[]string{"dn", "cn"},
+		nil,
+	)
+
+	sr, err := c.Conn.Search(searchRequest)
+	if err != nil {
+		return false, err
+	}
+
+	return len(sr.Entries) > 0, nil
+}
+
+func (c *Client) TeachersDN() string {
+	return fmt.Sprintf("%s,%s", c.Config.Ldap.TeachersDN, c.DomainDN())
+}
+
+func (c *Client) DomainDN() string {
+	return c.Config.DomainDN()
+}
+
+func (c *Client) TeacherDN(teacher *orm.Teacher) string {
+	return fmt.Sprintf("cn=%s %s,%s", teacher.Name, teacher.Surname, c.TeachersDN())
+}
+
+func (c *Client) GroupsDN() string {
+	return fmt.Sprintf("%s,%s", c.Config.Ldap.GroupsDN, c.DomainDN())
+}
+
+func (c *Client) PeopleDN() string {
+	return fmt.Sprintf("%s,%s", c.Config.Ldap.PeopleDN, c.DomainDN())
+}
+
+func (c *Client) NextMailUIDNumber() (string, error) {
+	searchRequest := ldap.NewSearchRequest(
+		c.PeopleDN(),
+		ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
+		"(mailUidNumber=*)",
+		[]string{"mailUidNumber"},
+		nil,
+	)
+	sr, err := c.Conn.Search(searchRequest)
+	if err != nil {
+		return "0", err
+	}
+
+	if len(sr.Entries) == 0 {
+		return c.Config.Ldap.FirstUIDNumber, nil
+	}
+
+	var uids []int
+
+	for _, e := range sr.Entries {
+		n, err := strconv.Atoi(e.Attributes[0].Values[0])
+		uids = append(uids, n)
+		if err != nil {
+			return "0", err
+		}
+	}
+
+	sort.Ints(uids)
+
+	nextUid := uids[len(uids)-1] + 1
+
+	return strconv.Itoa(nextUid), nil
+}
+
+func (c *Client) memberAttribute(groupDN string) (string, error) {
+	searchRequest := ldap.NewSearchRequest(
+		fmt.Sprintf("%s,%s", groupDN, c.GroupsDN()),
+		ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
+		"(member=*)",
+		[]string{"member"},
+		nil,
+	)
+
+	sr, err := c.Conn.Search(searchRequest)
+	if err != nil {
+		return "", err
+	}
+
+	if len(sr.Entries) > 0 {
+		return "member", nil
+	}
+
+	return "memberuid", nil
+}
+
+func (c *Client) personDN(person CompleteNameI, baseDN string) string {
+	dn := fmt.Sprintf("cn=%s,%s", person.CompleteName(), baseDN)
+	return dn
+}

+ 2 - 0
vendor/gogs.carducci-dante.gov.it/karmen/util/template/.gitignore

@@ -0,0 +1,2 @@
+*~
+

+ 29 - 0
vendor/gogs.carducci-dante.gov.it/karmen/util/template/html.go

@@ -0,0 +1,29 @@
+package template
+
+import (
+	"html/template"
+	"io/ioutil"
+)
+
+func LoadHTMLTemplate(filename string, funcMap ...template.FuncMap) (*template.Template, error) {
+	var (
+		tpl *template.Template
+		err error
+	)
+	content, err := ioutil.ReadFile(filename)
+	if err != nil {
+		return nil, err
+	}
+	if len(funcMap) > 0 {
+		tpl, err = template.New(filename).Funcs(funcMap[0]).Parse(string(content))
+		if err != nil {
+			return nil, err
+		}
+	} else {
+		tpl, err = template.New(filename).Parse(string(content))
+		if err != nil {
+			return nil, err
+		}
+	}
+	return tpl, nil
+}

+ 29 - 0
vendor/gogs.carducci-dante.gov.it/karmen/util/template/template.go

@@ -0,0 +1,29 @@
+package template
+
+import (
+	"io/ioutil"
+	"text/template"
+)
+
+func LoadTextTemplate(filename string, funcMap ...template.FuncMap) (*template.Template, error) {
+	var (
+		tpl *template.Template
+		err error
+	)
+	content, err := ioutil.ReadFile(filename)
+	if err != nil {
+		return nil, err
+	}
+	if len(funcMap) > 0 {
+		tpl, err = template.New(filename).Funcs(funcMap[0]).Parse(string(content))
+		if err != nil {
+			return nil, err
+		}
+	} else {
+		tpl, err = template.New(filename).Parse(string(content))
+		if err != nil {
+			return nil, err
+		}
+	}
+	return tpl, nil
+}

+ 18 - 0
vendor/gopkg.in/asn1-ber.v1/.travis.yml

@@ -0,0 +1,18 @@
+language: go
+go:
+    - 1.2
+    - 1.3
+    - 1.4
+    - 1.5
+    - 1.6
+    - 1.7
+    - 1.8
+    - tip
+go_import_path: gopkg.in/asn-ber.v1
+install:
+    - go list -f '{{range .Imports}}{{.}} {{end}}' ./... | xargs go get -v
+    - go list -f '{{range .TestImports}}{{.}} {{end}}' ./... | xargs go get -v
+    - go get code.google.com/p/go.tools/cmd/cover || go get golang.org/x/tools/cmd/cover
+    - go build -v ./...
+script:
+    - go test -v -cover ./...

+ 22 - 0
vendor/gopkg.in/asn1-ber.v1/LICENSE

@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com)
+Portions copyright (c) 2015-2016 go-asn1-ber Authors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 24 - 0
vendor/gopkg.in/asn1-ber.v1/README.md

@@ -0,0 +1,24 @@
+[![GoDoc](https://godoc.org/gopkg.in/asn1-ber.v1?status.svg)](https://godoc.org/gopkg.in/asn1-ber.v1) [![Build Status](https://travis-ci.org/go-asn1-ber/asn1-ber.svg)](https://travis-ci.org/go-asn1-ber/asn1-ber)
+
+
+ASN1 BER Encoding / Decoding Library for the GO programming language.
+---------------------------------------------------------------------
+
+Required libraries: 
+   None
+
+Working:
+   Very basic encoding / decoding needed for LDAP protocol
+
+Tests Implemented:
+   A few
+
+TODO:
+   Fix all encoding / decoding to conform to ASN1 BER spec
+   Implement Tests / Benchmarks
+
+---
+
+The Go gopher was designed by Renee French. (http://reneefrench.blogspot.com/)
+The design is licensed under the Creative Commons 3.0 Attributions license.
+Read this article for more details: http://blog.golang.org/gopher

+ 504 - 0
vendor/gopkg.in/asn1-ber.v1/ber.go

@@ -0,0 +1,504 @@
+package ber
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+)
+
+type Packet struct {
+	Identifier
+	Value       interface{}
+	ByteValue   []byte
+	Data        *bytes.Buffer
+	Children    []*Packet
+	Description string
+}
+
+type Identifier struct {
+	ClassType Class
+	TagType   Type
+	Tag       Tag
+}
+
+type Tag uint64
+
+const (
+	TagEOC              Tag = 0x00
+	TagBoolean          Tag = 0x01
+	TagInteger          Tag = 0x02
+	TagBitString        Tag = 0x03
+	TagOctetString      Tag = 0x04
+	TagNULL             Tag = 0x05
+	TagObjectIdentifier Tag = 0x06
+	TagObjectDescriptor Tag = 0x07
+	TagExternal         Tag = 0x08
+	TagRealFloat        Tag = 0x09
+	TagEnumerated       Tag = 0x0a
+	TagEmbeddedPDV      Tag = 0x0b
+	TagUTF8String       Tag = 0x0c
+	TagRelativeOID      Tag = 0x0d
+	TagSequence         Tag = 0x10
+	TagSet              Tag = 0x11
+	TagNumericString    Tag = 0x12
+	TagPrintableString  Tag = 0x13
+	TagT61String        Tag = 0x14
+	TagVideotexString   Tag = 0x15
+	TagIA5String        Tag = 0x16
+	TagUTCTime          Tag = 0x17
+	TagGeneralizedTime  Tag = 0x18
+	TagGraphicString    Tag = 0x19
+	TagVisibleString    Tag = 0x1a
+	TagGeneralString    Tag = 0x1b
+	TagUniversalString  Tag = 0x1c
+	TagCharacterString  Tag = 0x1d
+	TagBMPString        Tag = 0x1e
+	TagBitmask          Tag = 0x1f // xxx11111b
+
+	// HighTag indicates the start of a high-tag byte sequence
+	HighTag Tag = 0x1f // xxx11111b
+	// HighTagContinueBitmask indicates the high-tag byte sequence should continue
+	HighTagContinueBitmask Tag = 0x80 // 10000000b
+	// HighTagValueBitmask obtains the tag value from a high-tag byte sequence byte
+	HighTagValueBitmask Tag = 0x7f // 01111111b
+)
+
+const (
+	// LengthLongFormBitmask is the mask to apply to the length byte to see if a long-form byte sequence is used
+	LengthLongFormBitmask = 0x80
+	// LengthValueBitmask is the mask to apply to the length byte to get the number of bytes in the long-form byte sequence
+	LengthValueBitmask = 0x7f
+
+	// LengthIndefinite is returned from readLength to indicate an indefinite length
+	LengthIndefinite = -1
+)
+
+var tagMap = map[Tag]string{
+	TagEOC:              "EOC (End-of-Content)",
+	TagBoolean:          "Boolean",
+	TagInteger:          "Integer",
+	TagBitString:        "Bit String",
+	TagOctetString:      "Octet String",
+	TagNULL:             "NULL",
+	TagObjectIdentifier: "Object Identifier",
+	TagObjectDescriptor: "Object Descriptor",
+	TagExternal:         "External",
+	TagRealFloat:        "Real (float)",
+	TagEnumerated:       "Enumerated",
+	TagEmbeddedPDV:      "Embedded PDV",
+	TagUTF8String:       "UTF8 String",
+	TagRelativeOID:      "Relative-OID",
+	TagSequence:         "Sequence and Sequence of",
+	TagSet:              "Set and Set OF",
+	TagNumericString:    "Numeric String",
+	TagPrintableString:  "Printable String",
+	TagT61String:        "T61 String",
+	TagVideotexString:   "Videotex String",
+	TagIA5String:        "IA5 String",
+	TagUTCTime:          "UTC Time",
+	TagGeneralizedTime:  "Generalized Time",
+	TagGraphicString:    "Graphic String",
+	TagVisibleString:    "Visible String",
+	TagGeneralString:    "General String",
+	TagUniversalString:  "Universal String",
+	TagCharacterString:  "Character String",
+	TagBMPString:        "BMP String",
+}
+
+type Class uint8
+
+const (
+	ClassUniversal   Class = 0   // 00xxxxxxb
+	ClassApplication Class = 64  // 01xxxxxxb
+	ClassContext     Class = 128 // 10xxxxxxb
+	ClassPrivate     Class = 192 // 11xxxxxxb
+	ClassBitmask     Class = 192 // 11xxxxxxb
+)
+
+var ClassMap = map[Class]string{
+	ClassUniversal:   "Universal",
+	ClassApplication: "Application",
+	ClassContext:     "Context",
+	ClassPrivate:     "Private",
+}
+
+type Type uint8
+
+const (
+	TypePrimitive   Type = 0  // xx0xxxxxb
+	TypeConstructed Type = 32 // xx1xxxxxb
+	TypeBitmask     Type = 32 // xx1xxxxxb
+)
+
+var TypeMap = map[Type]string{
+	TypePrimitive:   "Primitive",
+	TypeConstructed: "Constructed",
+}
+
+var Debug bool = false
+
+func PrintBytes(out io.Writer, buf []byte, indent string) {
+	data_lines := make([]string, (len(buf)/30)+1)
+	num_lines := make([]string, (len(buf)/30)+1)
+
+	for i, b := range buf {
+		data_lines[i/30] += fmt.Sprintf("%02x ", b)
+		num_lines[i/30] += fmt.Sprintf("%02d ", (i+1)%100)
+	}
+
+	for i := 0; i < len(data_lines); i++ {
+		out.Write([]byte(indent + data_lines[i] + "\n"))
+		out.Write([]byte(indent + num_lines[i] + "\n\n"))
+	}
+}
+
+func PrintPacket(p *Packet) {
+	printPacket(os.Stdout, p, 0, false)
+}
+
+func printPacket(out io.Writer, p *Packet, indent int, printBytes bool) {
+	indent_str := ""
+
+	for len(indent_str) != indent {
+		indent_str += " "
+	}
+
+	class_str := ClassMap[p.ClassType]
+
+	tagtype_str := TypeMap[p.TagType]
+
+	tag_str := fmt.Sprintf("0x%02X", p.Tag)
+
+	if p.ClassType == ClassUniversal {
+		tag_str = tagMap[p.Tag]
+	}
+
+	value := fmt.Sprint(p.Value)
+	description := ""
+
+	if p.Description != "" {
+		description = p.Description + ": "
+	}
+
+	fmt.Fprintf(out, "%s%s(%s, %s, %s) Len=%d %q\n", indent_str, description, class_str, tagtype_str, tag_str, p.Data.Len(), value)
+
+	if printBytes {
+		PrintBytes(out, p.Bytes(), indent_str)
+	}
+
+	for _, child := range p.Children {
+		printPacket(out, child, indent+1, printBytes)
+	}
+}
+
+// ReadPacket reads a single Packet from the reader
+func ReadPacket(reader io.Reader) (*Packet, error) {
+	p, _, err := readPacket(reader)
+	if err != nil {
+		return nil, err
+	}
+	return p, nil
+}
+
+func DecodeString(data []byte) string {
+	return string(data)
+}
+
+func parseInt64(bytes []byte) (ret int64, err error) {
+	if len(bytes) > 8 {
+		// We'll overflow an int64 in this case.
+		err = fmt.Errorf("integer too large")
+		return
+	}
+	for bytesRead := 0; bytesRead < len(bytes); bytesRead++ {
+		ret <<= 8
+		ret |= int64(bytes[bytesRead])
+	}
+
+	// Shift up and down in order to sign extend the result.
+	ret <<= 64 - uint8(len(bytes))*8
+	ret >>= 64 - uint8(len(bytes))*8
+	return
+}
+
+func encodeInteger(i int64) []byte {
+	n := int64Length(i)
+	out := make([]byte, n)
+
+	var j int
+	for ; n > 0; n-- {
+		out[j] = (byte(i >> uint((n-1)*8)))
+		j++
+	}
+
+	return out
+}
+
+func int64Length(i int64) (numBytes int) {
+	numBytes = 1
+
+	for i > 127 {
+		numBytes++
+		i >>= 8
+	}
+
+	for i < -128 {
+		numBytes++
+		i >>= 8
+	}
+
+	return
+}
+
+// DecodePacket decodes the given bytes into a single Packet
+// If a decode error is encountered, nil is returned.
+func DecodePacket(data []byte) *Packet {
+	p, _, _ := readPacket(bytes.NewBuffer(data))
+
+	return p
+}
+
+// DecodePacketErr decodes the given bytes into a single Packet
+// If a decode error is encountered, nil is returned
+func DecodePacketErr(data []byte) (*Packet, error) {
+	p, _, err := readPacket(bytes.NewBuffer(data))
+	if err != nil {
+		return nil, err
+	}
+	return p, nil
+}
+
+// readPacket reads a single Packet from the reader, returning the number of bytes read
+func readPacket(reader io.Reader) (*Packet, int, error) {
+	identifier, length, read, err := readHeader(reader)
+	if err != nil {
+		return nil, read, err
+	}
+
+	p := &Packet{
+		Identifier: identifier,
+	}
+
+	p.Data = new(bytes.Buffer)
+	p.Children = make([]*Packet, 0, 2)
+	p.Value = nil
+
+	if p.TagType == TypeConstructed {
+		// TODO: if universal, ensure tag type is allowed to be constructed
+
+		// Track how much content we've read
+		contentRead := 0
+		for {
+			if length != LengthIndefinite {
+				// End if we've read what we've been told to
+				if contentRead == length {
+					break
+				}
+				// Detect if a packet boundary didn't fall on the expected length
+				if contentRead > length {
+					return nil, read, fmt.Errorf("expected to read %d bytes, read %d", length, contentRead)
+				}
+			}
+
+			// Read the next packet
+			child, r, err := readPacket(reader)
+			if err != nil {
+				return nil, read, err
+			}
+			contentRead += r
+			read += r
+
+			// Test is this is the EOC marker for our packet
+			if isEOCPacket(child) {
+				if length == LengthIndefinite {
+					break
+				}
+				return nil, read, errors.New("eoc child not allowed with definite length")
+			}
+
+			// Append and continue
+			p.AppendChild(child)
+		}
+		return p, read, nil
+	}
+
+	if length == LengthIndefinite {
+		return nil, read, errors.New("indefinite length used with primitive type")
+	}
+
+	// Read definite-length content
+	content := make([]byte, length, length)
+	if length > 0 {
+		_, err := io.ReadFull(reader, content)
+		if err != nil {
+			if err == io.EOF {
+				return nil, read, io.ErrUnexpectedEOF
+			}
+			return nil, read, err
+		}
+		read += length
+	}
+
+	if p.ClassType == ClassUniversal {
+		p.Data.Write(content)
+		p.ByteValue = content
+
+		switch p.Tag {
+		case TagEOC:
+		case TagBoolean:
+			val, _ := parseInt64(content)
+
+			p.Value = val != 0
+		case TagInteger:
+			p.Value, _ = parseInt64(content)
+		case TagBitString:
+		case TagOctetString:
+			// the actual string encoding is not known here
+			// (e.g. for LDAP content is already an UTF8-encoded
+			// string). Return the data without further processing
+			p.Value = DecodeString(content)
+		case TagNULL:
+		case TagObjectIdentifier:
+		case TagObjectDescriptor:
+		case TagExternal:
+		case TagRealFloat:
+		case TagEnumerated:
+			p.Value, _ = parseInt64(content)
+		case TagEmbeddedPDV:
+		case TagUTF8String:
+			p.Value = DecodeString(content)
+		case TagRelativeOID:
+		case TagSequence:
+		case TagSet:
+		case TagNumericString:
+		case TagPrintableString:
+			p.Value = DecodeString(content)
+		case TagT61String:
+		case TagVideotexString:
+		case TagIA5String:
+		case TagUTCTime:
+		case TagGeneralizedTime:
+		case TagGraphicString:
+		case TagVisibleString:
+		case TagGeneralString:
+		case TagUniversalString:
+		case TagCharacterString:
+		case TagBMPString:
+		}
+	} else {
+		p.Data.Write(content)
+	}
+
+	return p, read, nil
+}
+
+func (p *Packet) Bytes() []byte {
+	var out bytes.Buffer
+
+	out.Write(encodeIdentifier(p.Identifier))
+	out.Write(encodeLength(p.Data.Len()))
+	out.Write(p.Data.Bytes())
+
+	return out.Bytes()
+}
+
+func (p *Packet) AppendChild(child *Packet) {
+	p.Data.Write(child.Bytes())
+	p.Children = append(p.Children, child)
+}
+
+func Encode(ClassType Class, TagType Type, Tag Tag, Value interface{}, Description string) *Packet {
+	p := new(Packet)
+
+	p.ClassType = ClassType
+	p.TagType = TagType
+	p.Tag = Tag
+	p.Data = new(bytes.Buffer)
+
+	p.Children = make([]*Packet, 0, 2)
+
+	p.Value = Value
+	p.Description = Description
+
+	if Value != nil {
+		v := reflect.ValueOf(Value)
+
+		if ClassType == ClassUniversal {
+			switch Tag {
+			case TagOctetString:
+				sv, ok := v.Interface().(string)
+
+				if ok {
+					p.Data.Write([]byte(sv))
+				}
+			}
+		}
+	}
+
+	return p
+}
+
+func NewSequence(Description string) *Packet {
+	return Encode(ClassUniversal, TypeConstructed, TagSequence, nil, Description)
+}
+
+func NewBoolean(ClassType Class, TagType Type, Tag Tag, Value bool, Description string) *Packet {
+	intValue := int64(0)
+
+	if Value {
+		intValue = 1
+	}
+
+	p := Encode(ClassType, TagType, Tag, nil, Description)
+
+	p.Value = Value
+	p.Data.Write(encodeInteger(intValue))
+
+	return p
+}
+
+func NewInteger(ClassType Class, TagType Type, Tag Tag, Value interface{}, Description string) *Packet {
+	p := Encode(ClassType, TagType, Tag, nil, Description)
+
+	p.Value = Value
+	switch v := Value.(type) {
+	case int:
+		p.Data.Write(encodeInteger(int64(v)))
+	case uint:
+		p.Data.Write(encodeInteger(int64(v)))
+	case int64:
+		p.Data.Write(encodeInteger(v))
+	case uint64:
+		// TODO : check range or add encodeUInt...
+		p.Data.Write(encodeInteger(int64(v)))
+	case int32:
+		p.Data.Write(encodeInteger(int64(v)))
+	case uint32:
+		p.Data.Write(encodeInteger(int64(v)))
+	case int16:
+		p.Data.Write(encodeInteger(int64(v)))
+	case uint16:
+		p.Data.Write(encodeInteger(int64(v)))
+	case int8:
+		p.Data.Write(encodeInteger(int64(v)))
+	case uint8:
+		p.Data.Write(encodeInteger(int64(v)))
+	default:
+		// TODO : add support for big.Int ?
+		panic(fmt.Sprintf("Invalid type %T, expected {u|}int{64|32|16|8}", v))
+	}
+
+	return p
+}
+
+func NewString(ClassType Class, TagType Type, Tag Tag, Value, Description string) *Packet {
+	p := Encode(ClassType, TagType, Tag, nil, Description)
+
+	p.Value = Value
+	p.Data.Write([]byte(Value))
+
+	return p
+}

+ 25 - 0
vendor/gopkg.in/asn1-ber.v1/content_int.go

@@ -0,0 +1,25 @@
+package ber
+
+func encodeUnsignedInteger(i uint64) []byte {
+	n := uint64Length(i)
+	out := make([]byte, n)
+
+	var j int
+	for ; n > 0; n-- {
+		out[j] = (byte(i >> uint((n-1)*8)))
+		j++
+	}
+
+	return out
+}
+
+func uint64Length(i uint64) (numBytes int) {
+	numBytes = 1
+
+	for i > 255 {
+		numBytes++
+		i >>= 8
+	}
+
+	return
+}

+ 29 - 0
vendor/gopkg.in/asn1-ber.v1/header.go

@@ -0,0 +1,29 @@
+package ber
+
+import (
+	"errors"
+	"io"
+)
+
+func readHeader(reader io.Reader) (identifier Identifier, length int, read int, err error) {
+	if i, c, err := readIdentifier(reader); err != nil {
+		return Identifier{}, 0, read, err
+	} else {
+		identifier = i
+		read += c
+	}
+
+	if l, c, err := readLength(reader); err != nil {
+		return Identifier{}, 0, read, err
+	} else {
+		length = l
+		read += c
+	}
+
+	// Validate length type with identifier (x.600, 8.1.3.2.a)
+	if length == LengthIndefinite && identifier.TagType == TypePrimitive {
+		return Identifier{}, 0, read, errors.New("indefinite length used with primitive type")
+	}
+
+	return identifier, length, read, nil
+}

+ 103 - 0
vendor/gopkg.in/asn1-ber.v1/identifier.go

@@ -0,0 +1,103 @@
+package ber
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"math"
+)
+
+func readIdentifier(reader io.Reader) (Identifier, int, error) {
+	identifier := Identifier{}
+	read := 0
+
+	// identifier byte
+	b, err := readByte(reader)
+	if err != nil {
+		if Debug {
+			fmt.Printf("error reading identifier byte: %v\n", err)
+		}
+		return Identifier{}, read, err
+	}
+	read++
+
+	identifier.ClassType = Class(b) & ClassBitmask
+	identifier.TagType = Type(b) & TypeBitmask
+
+	if tag := Tag(b) & TagBitmask; tag != HighTag {
+		// short-form tag
+		identifier.Tag = tag
+		return identifier, read, nil
+	}
+
+	// high-tag-number tag
+	tagBytes := 0
+	for {
+		b, err := readByte(reader)
+		if err != nil {
+			if Debug {
+				fmt.Printf("error reading high-tag-number tag byte %d: %v\n", tagBytes, err)
+			}
+			return Identifier{}, read, err
+		}
+		tagBytes++
+		read++
+
+		// Lowest 7 bits get appended to the tag value (x.690, 8.1.2.4.2.b)
+		identifier.Tag <<= 7
+		identifier.Tag |= Tag(b) & HighTagValueBitmask
+
+		// First byte may not be all zeros (x.690, 8.1.2.4.2.c)
+		if tagBytes == 1 && identifier.Tag == 0 {
+			return Identifier{}, read, errors.New("invalid first high-tag-number tag byte")
+		}
+		// Overflow of int64
+		// TODO: support big int tags?
+		if tagBytes > 9 {
+			return Identifier{}, read, errors.New("high-tag-number tag overflow")
+		}
+
+		// Top bit of 0 means this is the last byte in the high-tag-number tag (x.690, 8.1.2.4.2.a)
+		if Tag(b)&HighTagContinueBitmask == 0 {
+			break
+		}
+	}
+
+	return identifier, read, nil
+}
+
+func encodeIdentifier(identifier Identifier) []byte {
+	b := []byte{0x0}
+	b[0] |= byte(identifier.ClassType)
+	b[0] |= byte(identifier.TagType)
+
+	if identifier.Tag < HighTag {
+		// Short-form
+		b[0] |= byte(identifier.Tag)
+	} else {
+		// high-tag-number
+		b[0] |= byte(HighTag)
+
+		tag := identifier.Tag
+
+		highBit := uint(63)
+		for {
+			if tag&(1<<highBit) != 0 {
+				break
+			}
+			highBit--
+		}
+
+		tagBytes := int(math.Ceil(float64(highBit) / 7.0))
+		for i := tagBytes - 1; i >= 0; i-- {
+			offset := uint(i) * 7
+			mask := Tag(0x7f) << offset
+			tagByte := (tag & mask) >> offset
+			if i != 0 {
+				tagByte |= 0x80
+			}
+			b = append(b, byte(tagByte))
+		}
+	}
+	return b
+}

+ 81 - 0
vendor/gopkg.in/asn1-ber.v1/length.go

@@ -0,0 +1,81 @@
+package ber
+
+import (
+	"errors"
+	"fmt"
+	"io"
+)
+
+func readLength(reader io.Reader) (length int, read int, err error) {
+	// length byte
+	b, err := readByte(reader)
+	if err != nil {
+		if Debug {
+			fmt.Printf("error reading length byte: %v\n", err)
+		}
+		return 0, 0, err
+	}
+	read++
+
+	switch {
+	case b == 0xFF:
+		// Invalid 0xFF (x.600, 8.1.3.5.c)
+		return 0, read, errors.New("invalid length byte 0xff")
+
+	case b == LengthLongFormBitmask:
+		// Indefinite form, we have to decode packets until we encounter an EOC packet (x.600, 8.1.3.6)
+		length = LengthIndefinite
+
+	case b&LengthLongFormBitmask == 0:
+		// Short definite form, extract the length from the bottom 7 bits (x.600, 8.1.3.4)
+		length = int(b) & LengthValueBitmask
+
+	case b&LengthLongFormBitmask != 0:
+		// Long definite form, extract the number of length bytes to follow from the bottom 7 bits (x.600, 8.1.3.5.b)
+		lengthBytes := int(b) & LengthValueBitmask
+		// Protect against overflow
+		// TODO: support big int length?
+		if lengthBytes > 8 {
+			return 0, read, errors.New("long-form length overflow")
+		}
+
+		// Accumulate into a 64-bit variable
+		var length64 int64
+		for i := 0; i < lengthBytes; i++ {
+			b, err = readByte(reader)
+			if err != nil {
+				if Debug {
+					fmt.Printf("error reading long-form length byte %d: %v\n", i, err)
+				}
+				return 0, read, err
+			}
+			read++
+
+			// x.600, 8.1.3.5
+			length64 <<= 8
+			length64 |= int64(b)
+		}
+
+		// Cast to a platform-specific integer
+		length = int(length64)
+		// Ensure we didn't overflow
+		if int64(length) != length64 {
+			return 0, read, errors.New("long-form length overflow")
+		}
+
+	default:
+		return 0, read, errors.New("invalid length byte")
+	}
+
+	return length, read, nil
+}
+
+func encodeLength(length int) []byte {
+	length_bytes := encodeUnsignedInteger(uint64(length))
+	if length > 127 || len(length_bytes) > 1 {
+		longFormBytes := []byte{(LengthLongFormBitmask | byte(len(length_bytes)))}
+		longFormBytes = append(longFormBytes, length_bytes...)
+		length_bytes = longFormBytes
+	}
+	return length_bytes
+}

+ 24 - 0
vendor/gopkg.in/asn1-ber.v1/util.go

@@ -0,0 +1,24 @@
+package ber
+
+import "io"
+
+func readByte(reader io.Reader) (byte, error) {
+	bytes := make([]byte, 1, 1)
+	_, err := io.ReadFull(reader, bytes)
+	if err != nil {
+		if err == io.EOF {
+			return 0, io.ErrUnexpectedEOF
+		}
+		return 0, err
+	}
+	return bytes[0], nil
+}
+
+func isEOCPacket(p *Packet) bool {
+	return p != nil &&
+		p.Tag == TagEOC &&
+		p.ClassType == ClassUniversal &&
+		p.TagType == TypePrimitive &&
+		len(p.ByteValue) == 0 &&
+		len(p.Children) == 0
+}

+ 9 - 0
vendor/gopkg.in/gomail.v2/.travis.yml

@@ -0,0 +1,9 @@
+language: go
+
+go:
+  - 1.2
+  - 1.3
+  - 1.4
+  - 1.5
+  - 1.6
+  - tip

+ 20 - 0
vendor/gopkg.in/gomail.v2/CHANGELOG.md

@@ -0,0 +1,20 @@
+# Change Log
+All notable changes to this project will be documented in this file.
+This project adheres to [Semantic Versioning](http://semver.org/).
+
+## [2.0.0] - 2015-09-02
+
+- Mailer has been removed. It has been replaced by Dialer and Sender.
+- `File` type and the `CreateFile` and `OpenFile` functions have been removed.
+- `Message.Attach` and `Message.Embed` have a new signature.
+- `Message.GetBodyWriter` has been removed. Use `Message.AddAlternativeWriter`
+instead.
+- `Message.Export` has been removed. `Message.WriteTo` can be used instead.
+- `Message.DelHeader` has been removed.
+- The `Bcc` header field is no longer sent. It is far more simpler and
+efficient: the same message is sent to all recipients instead of sending a
+different email to each Bcc address.
+- LoginAuth has been removed. `NewPlainDialer` now implements the LOGIN
+authentication mechanism when needed.
+- Go 1.2 is now required instead of Go 1.3. No external dependency are used when
+using Go 1.5.

+ 20 - 0
vendor/gopkg.in/gomail.v2/CONTRIBUTING.md

@@ -0,0 +1,20 @@
+Thank you for contributing to Gomail! Here are a few guidelines:
+
+## Bugs
+
+If you think you found a bug, create an issue and supply the minimum amount
+of code triggering the bug so it can be reproduced.
+
+
+## Fixing a bug
+
+If you want to fix a bug, you can send a pull request. It should contains a
+new test or update an existing one to cover that bug.
+
+
+## New feature proposal
+
+If you think Gomail lacks a feature, you can open an issue or send a pull
+request. I want to keep Gomail code and API as simple as possible so please
+describe your needs so we can discuss whether this feature should be added to
+Gomail or not.

+ 20 - 0
vendor/gopkg.in/gomail.v2/LICENSE

@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Alexandre Cesaro
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 92 - 0
vendor/gopkg.in/gomail.v2/README.md

@@ -0,0 +1,92 @@
+# Gomail
+[![Build Status](https://travis-ci.org/go-gomail/gomail.svg?branch=v2)](https://travis-ci.org/go-gomail/gomail) [![Code Coverage](http://gocover.io/_badge/gopkg.in/gomail.v2)](http://gocover.io/gopkg.in/gomail.v2) [![Documentation](https://godoc.org/gopkg.in/gomail.v2?status.svg)](https://godoc.org/gopkg.in/gomail.v2)
+
+## Introduction
+
+Gomail is a simple and efficient package to send emails. It is well tested and
+documented.
+
+Gomail can only send emails using an SMTP server. But the API is flexible and it
+is easy to implement other methods for sending emails using a local Postfix, an
+API, etc.
+
+It is versioned using [gopkg.in](https://gopkg.in) so I promise
+there will never be backward incompatible changes within each version.
+
+It requires Go 1.2 or newer. With Go 1.5, no external dependencies are used.
+
+
+## Features
+
+Gomail supports:
+- Attachments
+- Embedded images
+- HTML and text templates
+- Automatic encoding of special characters
+- SSL and TLS
+- Sending multiple emails with the same SMTP connection
+
+
+## Documentation
+
+https://godoc.org/gopkg.in/gomail.v2
+
+
+## Download
+
+    go get gopkg.in/gomail.v2
+
+
+## Examples
+
+See the [examples in the documentation](https://godoc.org/gopkg.in/gomail.v2#example-package).
+
+
+## FAQ
+
+### x509: certificate signed by unknown authority
+
+If you get this error it means the certificate used by the SMTP server is not
+considered valid by the client running Gomail. As a quick workaround you can
+bypass the verification of the server's certificate chain and host name by using
+`SetTLSConfig`:
+
+    package main
+
+    import (
+    	"crypto/tls"
+
+    	"gopkg.in/gomail.v2"
+    )
+
+    func main() {
+    	d := gomail.NewDialer("smtp.example.com", 587, "user", "123456")
+    	d.TLSConfig = &tls.Config{InsecureSkipVerify: true}
+
+        // Send emails using d.
+    }
+
+Note, however, that this is insecure and should not be used in production.
+
+
+## Contribute
+
+Contributions are more than welcome! See [CONTRIBUTING.md](CONTRIBUTING.md) for
+more info.
+
+
+## Change log
+
+See [CHANGELOG.md](CHANGELOG.md).
+
+
+## License
+
+[MIT](LICENSE)
+
+
+## Contact
+
+You can ask questions on the [Gomail
+thread](https://groups.google.com/d/topic/golang-nuts/jMxZHzvvEVg/discussion)
+in the Go mailing-list.

+ 49 - 0
vendor/gopkg.in/gomail.v2/auth.go

@@ -0,0 +1,49 @@
+package gomail
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"net/smtp"
+)
+
+// loginAuth is an smtp.Auth that implements the LOGIN authentication mechanism.
+type loginAuth struct {
+	username string
+	password string
+	host     string
+}
+
+func (a *loginAuth) Start(server *smtp.ServerInfo) (string, []byte, error) {
+	if !server.TLS {
+		advertised := false
+		for _, mechanism := range server.Auth {
+			if mechanism == "LOGIN" {
+				advertised = true
+				break
+			}
+		}
+		if !advertised {
+			return "", nil, errors.New("gomail: unencrypted connection")
+		}
+	}
+	if server.Name != a.host {
+		return "", nil, errors.New("gomail: wrong host name")
+	}
+	return "LOGIN", nil, nil
+}
+
+func (a *loginAuth) Next(fromServer []byte, more bool) ([]byte, error) {
+	if !more {
+		return nil, nil
+	}
+
+	switch {
+	case bytes.Equal(fromServer, []byte("Username:")):
+		return []byte(a.username), nil
+	case bytes.Equal(fromServer, []byte("Password:")):
+		return []byte(a.password), nil
+	default:
+		return nil, fmt.Errorf("gomail: unexpected server challenge: %s", fromServer)
+	}
+}

+ 5 - 0
vendor/gopkg.in/gomail.v2/doc.go

@@ -0,0 +1,5 @@
+// Package gomail provides a simple interface to compose emails and to mail them
+// efficiently.
+//
+// More info on Github: https://github.com/go-gomail/gomail
+package gomail

+ 322 - 0
vendor/gopkg.in/gomail.v2/message.go

@@ -0,0 +1,322 @@
+package gomail
+
+import (
+	"bytes"
+	"io"
+	"os"
+	"path/filepath"
+	"time"
+)
+
+// Message represents an email.
+type Message struct {
+	header      header
+	parts       []*part
+	attachments []*file
+	embedded    []*file
+	charset     string
+	encoding    Encoding
+	hEncoder    mimeEncoder
+	buf         bytes.Buffer
+}
+
+type header map[string][]string
+
+type part struct {
+	contentType string
+	copier      func(io.Writer) error
+	encoding    Encoding
+}
+
+// NewMessage creates a new message. It uses UTF-8 and quoted-printable encoding
+// by default.
+func NewMessage(settings ...MessageSetting) *Message {
+	m := &Message{
+		header:   make(header),
+		charset:  "UTF-8",
+		encoding: QuotedPrintable,
+	}
+
+	m.applySettings(settings)
+
+	if m.encoding == Base64 {
+		m.hEncoder = bEncoding
+	} else {
+		m.hEncoder = qEncoding
+	}
+
+	return m
+}
+
+// Reset resets the message so it can be reused. The message keeps its previous
+// settings so it is in the same state that after a call to NewMessage.
+func (m *Message) Reset() {
+	for k := range m.header {
+		delete(m.header, k)
+	}
+	m.parts = nil
+	m.attachments = nil
+	m.embedded = nil
+}
+
+func (m *Message) applySettings(settings []MessageSetting) {
+	for _, s := range settings {
+		s(m)
+	}
+}
+
+// A MessageSetting can be used as an argument in NewMessage to configure an
+// email.
+type MessageSetting func(m *Message)
+
+// SetCharset is a message setting to set the charset of the email.
+func SetCharset(charset string) MessageSetting {
+	return func(m *Message) {
+		m.charset = charset
+	}
+}
+
+// SetEncoding is a message setting to set the encoding of the email.
+func SetEncoding(enc Encoding) MessageSetting {
+	return func(m *Message) {
+		m.encoding = enc
+	}
+}
+
+// Encoding represents a MIME encoding scheme like quoted-printable or base64.
+type Encoding string
+
+const (
+	// QuotedPrintable represents the quoted-printable encoding as defined in
+	// RFC 2045.
+	QuotedPrintable Encoding = "quoted-printable"
+	// Base64 represents the base64 encoding as defined in RFC 2045.
+	Base64 Encoding = "base64"
+	// Unencoded can be used to avoid encoding the body of an email. The headers
+	// will still be encoded using quoted-printable encoding.
+	Unencoded Encoding = "8bit"
+)
+
+// SetHeader sets a value to the given header field.
+func (m *Message) SetHeader(field string, value ...string) {
+	m.encodeHeader(value)
+	m.header[field] = value
+}
+
+func (m *Message) encodeHeader(values []string) {
+	for i := range values {
+		values[i] = m.encodeString(values[i])
+	}
+}
+
+func (m *Message) encodeString(value string) string {
+	return m.hEncoder.Encode(m.charset, value)
+}
+
+// SetHeaders sets the message headers.
+func (m *Message) SetHeaders(h map[string][]string) {
+	for k, v := range h {
+		m.SetHeader(k, v...)
+	}
+}
+
+// SetAddressHeader sets an address to the given header field.
+func (m *Message) SetAddressHeader(field, address, name string) {
+	m.header[field] = []string{m.FormatAddress(address, name)}
+}
+
+// FormatAddress formats an address and a name as a valid RFC 5322 address.
+func (m *Message) FormatAddress(address, name string) string {
+	if name == "" {
+		return address
+	}
+
+	enc := m.encodeString(name)
+	if enc == name {
+		m.buf.WriteByte('"')
+		for i := 0; i < len(name); i++ {
+			b := name[i]
+			if b == '\\' || b == '"' {
+				m.buf.WriteByte('\\')
+			}
+			m.buf.WriteByte(b)
+		}
+		m.buf.WriteByte('"')
+	} else if hasSpecials(name) {
+		m.buf.WriteString(bEncoding.Encode(m.charset, name))
+	} else {
+		m.buf.WriteString(enc)
+	}
+	m.buf.WriteString(" <")
+	m.buf.WriteString(address)
+	m.buf.WriteByte('>')
+
+	addr := m.buf.String()
+	m.buf.Reset()
+	return addr
+}
+
+func hasSpecials(text string) bool {
+	for i := 0; i < len(text); i++ {
+		switch c := text[i]; c {
+		case '(', ')', '<', '>', '[', ']', ':', ';', '@', '\\', ',', '.', '"':
+			return true
+		}
+	}
+
+	return false
+}
+
+// SetDateHeader sets a date to the given header field.
+func (m *Message) SetDateHeader(field string, date time.Time) {
+	m.header[field] = []string{m.FormatDate(date)}
+}
+
+// FormatDate formats a date as a valid RFC 5322 date.
+func (m *Message) FormatDate(date time.Time) string {
+	return date.Format(time.RFC1123Z)
+}
+
+// GetHeader gets a header field.
+func (m *Message) GetHeader(field string) []string {
+	return m.header[field]
+}
+
+// SetBody sets the body of the message. It replaces any content previously set
+// by SetBody, AddAlternative or AddAlternativeWriter.
+func (m *Message) SetBody(contentType, body string, settings ...PartSetting) {
+	m.parts = []*part{m.newPart(contentType, newCopier(body), settings)}
+}
+
+// AddAlternative adds an alternative part to the message.
+//
+// It is commonly used to send HTML emails that default to the plain text
+// version for backward compatibility. AddAlternative appends the new part to
+// the end of the message. So the plain text part should be added before the
+// HTML part. See http://en.wikipedia.org/wiki/MIME#Alternative
+func (m *Message) AddAlternative(contentType, body string, settings ...PartSetting) {
+	m.AddAlternativeWriter(contentType, newCopier(body), settings...)
+}
+
+func newCopier(s string) func(io.Writer) error {
+	return func(w io.Writer) error {
+		_, err := io.WriteString(w, s)
+		return err
+	}
+}
+
+// AddAlternativeWriter adds an alternative part to the message. It can be
+// useful with the text/template or html/template packages.
+func (m *Message) AddAlternativeWriter(contentType string, f func(io.Writer) error, settings ...PartSetting) {
+	m.parts = append(m.parts, m.newPart(contentType, f, settings))
+}
+
+func (m *Message) newPart(contentType string, f func(io.Writer) error, settings []PartSetting) *part {
+	p := &part{
+		contentType: contentType,
+		copier:      f,
+		encoding:    m.encoding,
+	}
+
+	for _, s := range settings {
+		s(p)
+	}
+
+	return p
+}
+
+// A PartSetting can be used as an argument in Message.SetBody,
+// Message.AddAlternative or Message.AddAlternativeWriter to configure the part
+// added to a message.
+type PartSetting func(*part)
+
+// SetPartEncoding sets the encoding of the part added to the message. By
+// default, parts use the same encoding than the message.
+func SetPartEncoding(e Encoding) PartSetting {
+	return PartSetting(func(p *part) {
+		p.encoding = e
+	})
+}
+
+type file struct {
+	Name     string
+	Header   map[string][]string
+	CopyFunc func(w io.Writer) error
+}
+
+func (f *file) setHeader(field, value string) {
+	f.Header[field] = []string{value}
+}
+
+// A FileSetting can be used as an argument in Message.Attach or Message.Embed.
+type FileSetting func(*file)
+
+// SetHeader is a file setting to set the MIME header of the message part that
+// contains the file content.
+//
+// Mandatory headers are automatically added if they are not set when sending
+// the email.
+func SetHeader(h map[string][]string) FileSetting {
+	return func(f *file) {
+		for k, v := range h {
+			f.Header[k] = v
+		}
+	}
+}
+
+// Rename is a file setting to set the name of the attachment if the name is
+// different than the filename on disk.
+func Rename(name string) FileSetting {
+	return func(f *file) {
+		f.Name = name
+	}
+}
+
+// SetCopyFunc is a file setting to replace the function that runs when the
+// message is sent. It should copy the content of the file to the io.Writer.
+//
+// The default copy function opens the file with the given filename, and copy
+// its content to the io.Writer.
+func SetCopyFunc(f func(io.Writer) error) FileSetting {
+	return func(fi *file) {
+		fi.CopyFunc = f
+	}
+}
+
+func (m *Message) appendFile(list []*file, name string, settings []FileSetting) []*file {
+	f := &file{
+		Name:   filepath.Base(name),
+		Header: make(map[string][]string),
+		CopyFunc: func(w io.Writer) error {
+			h, err := os.Open(name)
+			if err != nil {
+				return err
+			}
+			if _, err := io.Copy(w, h); err != nil {
+				h.Close()
+				return err
+			}
+			return h.Close()
+		},
+	}
+
+	for _, s := range settings {
+		s(f)
+	}
+
+	if list == nil {
+		return []*file{f}
+	}
+
+	return append(list, f)
+}
+
+// Attach attaches the files to the email.
+func (m *Message) Attach(filename string, settings ...FileSetting) {
+	m.attachments = m.appendFile(m.attachments, filename, settings)
+}
+
+// Embed embeds the images to the email.
+func (m *Message) Embed(filename string, settings ...FileSetting) {
+	m.embedded = m.appendFile(m.embedded, filename, settings)
+}

+ 21 - 0
vendor/gopkg.in/gomail.v2/mime.go

@@ -0,0 +1,21 @@
+// +build go1.5
+
+package gomail
+
+import (
+	"mime"
+	"mime/quotedprintable"
+	"strings"
+)
+
+var newQPWriter = quotedprintable.NewWriter
+
+type mimeEncoder struct {
+	mime.WordEncoder
+}
+
+var (
+	bEncoding     = mimeEncoder{mime.BEncoding}
+	qEncoding     = mimeEncoder{mime.QEncoding}
+	lastIndexByte = strings.LastIndexByte
+)

+ 25 - 0
vendor/gopkg.in/gomail.v2/mime_go14.go

@@ -0,0 +1,25 @@
+// +build !go1.5
+
+package gomail
+
+import "gopkg.in/alexcesaro/quotedprintable.v3"
+
+var newQPWriter = quotedprintable.NewWriter
+
+type mimeEncoder struct {
+	quotedprintable.WordEncoder
+}
+
+var (
+	bEncoding     = mimeEncoder{quotedprintable.BEncoding}
+	qEncoding     = mimeEncoder{quotedprintable.QEncoding}
+	lastIndexByte = func(s string, c byte) int {
+		for i := len(s) - 1; i >= 0; i-- {
+
+			if s[i] == c {
+				return i
+			}
+		}
+		return -1
+	}
+)

+ 116 - 0
vendor/gopkg.in/gomail.v2/send.go

@@ -0,0 +1,116 @@
+package gomail
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"net/mail"
+)
+
+// Sender is the interface that wraps the Send method.
+//
+// Send sends an email to the given addresses.
+type Sender interface {
+	Send(from string, to []string, msg io.WriterTo) error
+}
+
+// SendCloser is the interface that groups the Send and Close methods.
+type SendCloser interface {
+	Sender
+	Close() error
+}
+
+// A SendFunc is a function that sends emails to the given addresses.
+//
+// The SendFunc type is an adapter to allow the use of ordinary functions as
+// email senders. If f is a function with the appropriate signature, SendFunc(f)
+// is a Sender object that calls f.
+type SendFunc func(from string, to []string, msg io.WriterTo) error
+
+// Send calls f(from, to, msg).
+func (f SendFunc) Send(from string, to []string, msg io.WriterTo) error {
+	return f(from, to, msg)
+}
+
+// Send sends emails using the given Sender.
+func Send(s Sender, msg ...*Message) error {
+	for i, m := range msg {
+		if err := send(s, m); err != nil {
+			return fmt.Errorf("gomail: could not send email %d: %v", i+1, err)
+		}
+	}
+
+	return nil
+}
+
+func send(s Sender, m *Message) error {
+	from, err := m.getFrom()
+	if err != nil {
+		return err
+	}
+
+	to, err := m.getRecipients()
+	if err != nil {
+		return err
+	}
+
+	if err := s.Send(from, to, m); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (m *Message) getFrom() (string, error) {
+	from := m.header["Sender"]
+	if len(from) == 0 {
+		from = m.header["From"]
+		if len(from) == 0 {
+			return "", errors.New(`gomail: invalid message, "From" field is absent`)
+		}
+	}
+
+	return parseAddress(from[0])
+}
+
+func (m *Message) getRecipients() ([]string, error) {
+	n := 0
+	for _, field := range []string{"To", "Cc", "Bcc"} {
+		if addresses, ok := m.header[field]; ok {
+			n += len(addresses)
+		}
+	}
+	list := make([]string, 0, n)
+
+	for _, field := range []string{"To", "Cc", "Bcc"} {
+		if addresses, ok := m.header[field]; ok {
+			for _, a := range addresses {
+				addr, err := parseAddress(a)
+				if err != nil {
+					return nil, err
+				}
+				list = addAddress(list, addr)
+			}
+		}
+	}
+
+	return list, nil
+}
+
+func addAddress(list []string, addr string) []string {
+	for _, a := range list {
+		if addr == a {
+			return list
+		}
+	}
+
+	return append(list, addr)
+}
+
+func parseAddress(field string) (string, error) {
+	addr, err := mail.ParseAddress(field)
+	if err != nil {
+		return "", fmt.Errorf("gomail: invalid address %q: %v", field, err)
+	}
+	return addr.Address, nil
+}

+ 202 - 0
vendor/gopkg.in/gomail.v2/smtp.go

@@ -0,0 +1,202 @@
+package gomail
+
+import (
+	"crypto/tls"
+	"fmt"
+	"io"
+	"net"
+	"net/smtp"
+	"strings"
+	"time"
+)
+
+// A Dialer is a dialer to an SMTP server.
+type Dialer struct {
+	// Host represents the host of the SMTP server.
+	Host string
+	// Port represents the port of the SMTP server.
+	Port int
+	// Username is the username to use to authenticate to the SMTP server.
+	Username string
+	// Password is the password to use to authenticate to the SMTP server.
+	Password string
+	// Auth represents the authentication mechanism used to authenticate to the
+	// SMTP server.
+	Auth smtp.Auth
+	// SSL defines whether an SSL connection is used. It should be false in
+	// most cases since the authentication mechanism should use the STARTTLS
+	// extension instead.
+	SSL bool
+	// TSLConfig represents the TLS configuration used for the TLS (when the
+	// STARTTLS extension is used) or SSL connection.
+	TLSConfig *tls.Config
+	// LocalName is the hostname sent to the SMTP server with the HELO command.
+	// By default, "localhost" is sent.
+	LocalName string
+}
+
+// NewDialer returns a new SMTP Dialer. The given parameters are used to connect
+// to the SMTP server.
+func NewDialer(host string, port int, username, password string) *Dialer {
+	return &Dialer{
+		Host:     host,
+		Port:     port,
+		Username: username,
+		Password: password,
+		SSL:      port == 465,
+	}
+}
+
+// NewPlainDialer returns a new SMTP Dialer. The given parameters are used to
+// connect to the SMTP server.
+//
+// Deprecated: Use NewDialer instead.
+func NewPlainDialer(host string, port int, username, password string) *Dialer {
+	return NewDialer(host, port, username, password)
+}
+
+// Dial dials and authenticates to an SMTP server. The returned SendCloser
+// should be closed when done using it.
+func (d *Dialer) Dial() (SendCloser, error) {
+	conn, err := netDialTimeout("tcp", addr(d.Host, d.Port), 10*time.Second)
+	if err != nil {
+		return nil, err
+	}
+
+	if d.SSL {
+		conn = tlsClient(conn, d.tlsConfig())
+	}
+
+	c, err := smtpNewClient(conn, d.Host)
+	if err != nil {
+		return nil, err
+	}
+
+	if d.LocalName != "" {
+		if err := c.Hello(d.LocalName); err != nil {
+			return nil, err
+		}
+	}
+
+	if !d.SSL {
+		if ok, _ := c.Extension("STARTTLS"); ok {
+			if err := c.StartTLS(d.tlsConfig()); err != nil {
+				c.Close()
+				return nil, err
+			}
+		}
+	}
+
+	if d.Auth == nil && d.Username != "" {
+		if ok, auths := c.Extension("AUTH"); ok {
+			if strings.Contains(auths, "CRAM-MD5") {
+				d.Auth = smtp.CRAMMD5Auth(d.Username, d.Password)
+			} else if strings.Contains(auths, "LOGIN") &&
+				!strings.Contains(auths, "PLAIN") {
+				d.Auth = &loginAuth{
+					username: d.Username,
+					password: d.Password,
+					host:     d.Host,
+				}
+			} else {
+				d.Auth = smtp.PlainAuth("", d.Username, d.Password, d.Host)
+			}
+		}
+	}
+
+	if d.Auth != nil {
+		if err = c.Auth(d.Auth); err != nil {
+			c.Close()
+			return nil, err
+		}
+	}
+
+	return &smtpSender{c, d}, nil
+}
+
+func (d *Dialer) tlsConfig() *tls.Config {
+	if d.TLSConfig == nil {
+		return &tls.Config{ServerName: d.Host}
+	}
+	return d.TLSConfig
+}
+
+func addr(host string, port int) string {
+	return fmt.Sprintf("%s:%d", host, port)
+}
+
+// DialAndSend opens a connection to the SMTP server, sends the given emails and
+// closes the connection.
+func (d *Dialer) DialAndSend(m ...*Message) error {
+	s, err := d.Dial()
+	if err != nil {
+		return err
+	}
+	defer s.Close()
+
+	return Send(s, m...)
+}
+
+type smtpSender struct {
+	smtpClient
+	d *Dialer
+}
+
+func (c *smtpSender) Send(from string, to []string, msg io.WriterTo) error {
+	if err := c.Mail(from); err != nil {
+		if err == io.EOF {
+			// This is probably due to a timeout, so reconnect and try again.
+			sc, derr := c.d.Dial()
+			if derr == nil {
+				if s, ok := sc.(*smtpSender); ok {
+					*c = *s
+					return c.Send(from, to, msg)
+				}
+			}
+		}
+		return err
+	}
+
+	for _, addr := range to {
+		if err := c.Rcpt(addr); err != nil {
+			return err
+		}
+	}
+
+	w, err := c.Data()
+	if err != nil {
+		return err
+	}
+
+	if _, err = msg.WriteTo(w); err != nil {
+		w.Close()
+		return err
+	}
+
+	return w.Close()
+}
+
+func (c *smtpSender) Close() error {
+	return c.Quit()
+}
+
+// Stubbed out for tests.
+var (
+	netDialTimeout = net.DialTimeout
+	tlsClient      = tls.Client
+	smtpNewClient  = func(conn net.Conn, host string) (smtpClient, error) {
+		return smtp.NewClient(conn, host)
+	}
+)
+
+type smtpClient interface {
+	Hello(string) error
+	Extension(string) (bool, string)
+	StartTLS(*tls.Config) error
+	Auth(smtp.Auth) error
+	Mail(string) error
+	Rcpt(string) error
+	Data() (io.WriteCloser, error)
+	Quit() error
+	Close() error
+}

+ 306 - 0
vendor/gopkg.in/gomail.v2/writeto.go

@@ -0,0 +1,306 @@
+package gomail
+
+import (
+	"encoding/base64"
+	"errors"
+	"io"
+	"mime"
+	"mime/multipart"
+	"path/filepath"
+	"strings"
+	"time"
+)
+
+// WriteTo implements io.WriterTo. It dumps the whole message into w.
+func (m *Message) WriteTo(w io.Writer) (int64, error) {
+	mw := &messageWriter{w: w}
+	mw.writeMessage(m)
+	return mw.n, mw.err
+}
+
+func (w *messageWriter) writeMessage(m *Message) {
+	if _, ok := m.header["Mime-Version"]; !ok {
+		w.writeString("Mime-Version: 1.0\r\n")
+	}
+	if _, ok := m.header["Date"]; !ok {
+		w.writeHeader("Date", m.FormatDate(now()))
+	}
+	w.writeHeaders(m.header)
+
+	if m.hasMixedPart() {
+		w.openMultipart("mixed")
+	}
+
+	if m.hasRelatedPart() {
+		w.openMultipart("related")
+	}
+
+	if m.hasAlternativePart() {
+		w.openMultipart("alternative")
+	}
+	for _, part := range m.parts {
+		w.writePart(part, m.charset)
+	}
+	if m.hasAlternativePart() {
+		w.closeMultipart()
+	}
+
+	w.addFiles(m.embedded, false)
+	if m.hasRelatedPart() {
+		w.closeMultipart()
+	}
+
+	w.addFiles(m.attachments, true)
+	if m.hasMixedPart() {
+		w.closeMultipart()
+	}
+}
+
+func (m *Message) hasMixedPart() bool {
+	return (len(m.parts) > 0 && len(m.attachments) > 0) || len(m.attachments) > 1
+}
+
+func (m *Message) hasRelatedPart() bool {
+	return (len(m.parts) > 0 && len(m.embedded) > 0) || len(m.embedded) > 1
+}
+
+func (m *Message) hasAlternativePart() bool {
+	return len(m.parts) > 1
+}
+
+type messageWriter struct {
+	w          io.Writer
+	n          int64
+	writers    [3]*multipart.Writer
+	partWriter io.Writer
+	depth      uint8
+	err        error
+}
+
+func (w *messageWriter) openMultipart(mimeType string) {
+	mw := multipart.NewWriter(w)
+	contentType := "multipart/" + mimeType + ";\r\n boundary=" + mw.Boundary()
+	w.writers[w.depth] = mw
+
+	if w.depth == 0 {
+		w.writeHeader("Content-Type", contentType)
+		w.writeString("\r\n")
+	} else {
+		w.createPart(map[string][]string{
+			"Content-Type": {contentType},
+		})
+	}
+	w.depth++
+}
+
+func (w *messageWriter) createPart(h map[string][]string) {
+	w.partWriter, w.err = w.writers[w.depth-1].CreatePart(h)
+}
+
+func (w *messageWriter) closeMultipart() {
+	if w.depth > 0 {
+		w.writers[w.depth-1].Close()
+		w.depth--
+	}
+}
+
+func (w *messageWriter) writePart(p *part, charset string) {
+	w.writeHeaders(map[string][]string{
+		"Content-Type":              {p.contentType + "; charset=" + charset},
+		"Content-Transfer-Encoding": {string(p.encoding)},
+	})
+	w.writeBody(p.copier, p.encoding)
+}
+
+func (w *messageWriter) addFiles(files []*file, isAttachment bool) {
+	for _, f := range files {
+		if _, ok := f.Header["Content-Type"]; !ok {
+			mediaType := mime.TypeByExtension(filepath.Ext(f.Name))
+			if mediaType == "" {
+				mediaType = "application/octet-stream"
+			}
+			f.setHeader("Content-Type", mediaType+`; name="`+f.Name+`"`)
+		}
+
+		if _, ok := f.Header["Content-Transfer-Encoding"]; !ok {
+			f.setHeader("Content-Transfer-Encoding", string(Base64))
+		}
+
+		if _, ok := f.Header["Content-Disposition"]; !ok {
+			var disp string
+			if isAttachment {
+				disp = "attachment"
+			} else {
+				disp = "inline"
+			}
+			f.setHeader("Content-Disposition", disp+`; filename="`+f.Name+`"`)
+		}
+
+		if !isAttachment {
+			if _, ok := f.Header["Content-ID"]; !ok {
+				f.setHeader("Content-ID", "<"+f.Name+">")
+			}
+		}
+		w.writeHeaders(f.Header)
+		w.writeBody(f.CopyFunc, Base64)
+	}
+}
+
+func (w *messageWriter) Write(p []byte) (int, error) {
+	if w.err != nil {
+		return 0, errors.New("gomail: cannot write as writer is in error")
+	}
+
+	var n int
+	n, w.err = w.w.Write(p)
+	w.n += int64(n)
+	return n, w.err
+}
+
+func (w *messageWriter) writeString(s string) {
+	n, _ := io.WriteString(w.w, s)
+	w.n += int64(n)
+}
+
+func (w *messageWriter) writeHeader(k string, v ...string) {
+	w.writeString(k)
+	if len(v) == 0 {
+		w.writeString(":\r\n")
+		return
+	}
+	w.writeString(": ")
+
+	// Max header line length is 78 characters in RFC 5322 and 76 characters
+	// in RFC 2047. So for the sake of simplicity we use the 76 characters
+	// limit.
+	charsLeft := 76 - len(k) - len(": ")
+
+	for i, s := range v {
+		// If the line is already too long, insert a newline right away.
+		if charsLeft < 1 {
+			if i == 0 {
+				w.writeString("\r\n ")
+			} else {
+				w.writeString(",\r\n ")
+			}
+			charsLeft = 75
+		} else if i != 0 {
+			w.writeString(", ")
+			charsLeft -= 2
+		}
+
+		// While the header content is too long, fold it by inserting a newline.
+		for len(s) > charsLeft {
+			s = w.writeLine(s, charsLeft)
+			charsLeft = 75
+		}
+		w.writeString(s)
+		if i := lastIndexByte(s, '\n'); i != -1 {
+			charsLeft = 75 - (len(s) - i - 1)
+		} else {
+			charsLeft -= len(s)
+		}
+	}
+	w.writeString("\r\n")
+}
+
+func (w *messageWriter) writeLine(s string, charsLeft int) string {
+	// If there is already a newline before the limit. Write the line.
+	if i := strings.IndexByte(s, '\n'); i != -1 && i < charsLeft {
+		w.writeString(s[:i+1])
+		return s[i+1:]
+	}
+
+	for i := charsLeft - 1; i >= 0; i-- {
+		if s[i] == ' ' {
+			w.writeString(s[:i])
+			w.writeString("\r\n ")
+			return s[i+1:]
+		}
+	}
+
+	// We could not insert a newline cleanly so look for a space or a newline
+	// even if it is after the limit.
+	for i := 75; i < len(s); i++ {
+		if s[i] == ' ' {
+			w.writeString(s[:i])
+			w.writeString("\r\n ")
+			return s[i+1:]
+		}
+		if s[i] == '\n' {
+			w.writeString(s[:i+1])
+			return s[i+1:]
+		}
+	}
+
+	// Too bad, no space or newline in the whole string. Just write everything.
+	w.writeString(s)
+	return ""
+}
+
+func (w *messageWriter) writeHeaders(h map[string][]string) {
+	if w.depth == 0 {
+		for k, v := range h {
+			if k != "Bcc" {
+				w.writeHeader(k, v...)
+			}
+		}
+	} else {
+		w.createPart(h)
+	}
+}
+
+func (w *messageWriter) writeBody(f func(io.Writer) error, enc Encoding) {
+	var subWriter io.Writer
+	if w.depth == 0 {
+		w.writeString("\r\n")
+		subWriter = w.w
+	} else {
+		subWriter = w.partWriter
+	}
+
+	if enc == Base64 {
+		wc := base64.NewEncoder(base64.StdEncoding, newBase64LineWriter(subWriter))
+		w.err = f(wc)
+		wc.Close()
+	} else if enc == Unencoded {
+		w.err = f(subWriter)
+	} else {
+		wc := newQPWriter(subWriter)
+		w.err = f(wc)
+		wc.Close()
+	}
+}
+
+// As required by RFC 2045, 6.7. (page 21) for quoted-printable, and
+// RFC 2045, 6.8. (page 25) for base64.
+const maxLineLen = 76
+
+// base64LineWriter limits text encoded in base64 to 76 characters per line
+type base64LineWriter struct {
+	w       io.Writer
+	lineLen int
+}
+
+func newBase64LineWriter(w io.Writer) *base64LineWriter {
+	return &base64LineWriter{w: w}
+}
+
+func (w *base64LineWriter) Write(p []byte) (int, error) {
+	n := 0
+	for len(p)+w.lineLen > maxLineLen {
+		w.w.Write(p[:maxLineLen-w.lineLen])
+		w.w.Write([]byte("\r\n"))
+		p = p[maxLineLen-w.lineLen:]
+		n += maxLineLen - w.lineLen
+		w.lineLen = 0
+	}
+
+	w.w.Write(p)
+	w.lineLen += len(p)
+
+	return n + len(p), nil
+}
+
+// Stubbed out for testing.
+var now = time.Now

+ 0 - 0
vendor/gopkg.in/ldap.v2/.gitignore


+ 29 - 0
vendor/gopkg.in/ldap.v2/.travis.yml

@@ -0,0 +1,29 @@
+language: go
+env:
+    global:
+        - VET_VERSIONS="1.6 1.7 tip"
+        - LINT_VERSIONS="1.6 1.7 tip"
+go:
+    - 1.2
+    - 1.3
+    - 1.4
+    - 1.5
+    - 1.6
+    - 1.7
+    - tip
+matrix:
+    fast_finish: true
+    allow_failures:
+        - go: tip
+go_import_path: gopkg.in/ldap.v2
+install:
+    - go get gopkg.in/asn1-ber.v1
+    - go get gopkg.in/ldap.v2
+    - go get code.google.com/p/go.tools/cmd/cover || go get golang.org/x/tools/cmd/cover
+    - go get github.com/golang/lint/golint || true
+    - go build -v ./...
+script:
+    - make test
+    - make fmt
+    - if [[ "$VET_VERSIONS"  == *"$TRAVIS_GO_VERSION"* ]]; then make vet; fi
+    - if [[ "$LINT_VERSIONS" == *"$TRAVIS_GO_VERSION"* ]]; then make lint; fi

+ 22 - 0
vendor/gopkg.in/ldap.v2/LICENSE

@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com)
+Portions copyright (c) 2015-2016 go-ldap Authors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 42 - 0
vendor/gopkg.in/ldap.v2/Makefile

@@ -0,0 +1,42 @@
+.PHONY: default install build test quicktest fmt vet lint 
+
+default: fmt vet lint build quicktest
+
+install:
+	go get -t -v ./...
+
+build:
+	go build -v ./...
+
+test:
+	go test -v -cover ./...
+
+quicktest:
+	go test ./...
+
+# Capture output and force failure when there is non-empty output
+fmt:
+	@echo gofmt -l .
+	@OUTPUT=`gofmt -l . 2>&1`; \
+	if [ "$$OUTPUT" ]; then \
+		echo "gofmt must be run on the following files:"; \
+		echo "$$OUTPUT"; \
+		exit 1; \
+	fi
+
+# Only run on go1.5+
+vet:
+	go tool vet -atomic -bool -copylocks -nilfunc -printf -shadow -rangeloops -unreachable -unsafeptr -unusedresult .
+
+# https://github.com/golang/lint
+# go get github.com/golang/lint/golint
+# Capture output and force failure when there is non-empty output
+# Only run on go1.5+
+lint:
+	@echo golint ./...
+	@OUTPUT=`golint ./... 2>&1`; \
+	if [ "$$OUTPUT" ]; then \
+		echo "golint errors:"; \
+		echo "$$OUTPUT"; \
+		exit 1; \
+	fi

+ 53 - 0
vendor/gopkg.in/ldap.v2/README.md

@@ -0,0 +1,53 @@
+[![GoDoc](https://godoc.org/gopkg.in/ldap.v2?status.svg)](https://godoc.org/gopkg.in/ldap.v2)
+[![Build Status](https://travis-ci.org/go-ldap/ldap.svg)](https://travis-ci.org/go-ldap/ldap)
+
+# Basic LDAP v3 functionality for the GO programming language.
+
+## Install
+
+For the latest version use:
+
+    go get gopkg.in/ldap.v2
+
+Import the latest version with:
+
+    import "gopkg.in/ldap.v2"
+
+## Required Libraries:
+
+ - gopkg.in/asn1-ber.v1
+
+## Features:
+
+ - Connecting to LDAP server (non-TLS, TLS, STARTTLS)
+ - Binding to LDAP server
+ - Searching for entries
+ - Filter Compile / Decompile
+ - Paging Search Results
+ - Modify Requests / Responses
+ - Add Requests / Responses
+ - Delete Requests / Responses
+
+## Examples:
+
+ - search
+ - modify
+
+## Contributing:
+
+Bug reports and pull requests are welcome!
+
+Before submitting a pull request, please make sure tests and verification scripts pass:
+```
+make all
+```
+
+To set up a pre-push hook to run the tests and verify scripts before pushing:
+```
+ln -s ../../.githooks/pre-push .git/hooks/pre-push
+```
+
+---
+The Go gopher was designed by Renee French. (http://reneefrench.blogspot.com/)
+The design is licensed under the Creative Commons 3.0 Attributions license.
+Read this article for more details: http://blog.golang.org/gopher

+ 113 - 0
vendor/gopkg.in/ldap.v2/add.go

@@ -0,0 +1,113 @@
+//
+// https://tools.ietf.org/html/rfc4511
+//
+// AddRequest ::= [APPLICATION 8] SEQUENCE {
+//      entry           LDAPDN,
+//      attributes      AttributeList }
+//
+// AttributeList ::= SEQUENCE OF attribute Attribute
+
+package ldap
+
+import (
+	"errors"
+	"log"
+
+	"gopkg.in/asn1-ber.v1"
+)
+
+// Attribute represents an LDAP attribute
+type Attribute struct {
+	// Type is the name of the LDAP attribute
+	Type string
+	// Vals are the LDAP attribute values
+	Vals []string
+}
+
+func (a *Attribute) encode() *ber.Packet {
+	seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attribute")
+	seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, a.Type, "Type"))
+	set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue")
+	for _, value := range a.Vals {
+		set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals"))
+	}
+	seq.AppendChild(set)
+	return seq
+}
+
+// AddRequest represents an LDAP AddRequest operation
+type AddRequest struct {
+	// DN identifies the entry being added
+	DN string
+	// Attributes list the attributes of the new entry
+	Attributes []Attribute
+}
+
+func (a AddRequest) encode() *ber.Packet {
+	request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationAddRequest, nil, "Add Request")
+	request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, a.DN, "DN"))
+	attributes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes")
+	for _, attribute := range a.Attributes {
+		attributes.AppendChild(attribute.encode())
+	}
+	request.AppendChild(attributes)
+	return request
+}
+
+// Attribute adds an attribute with the given type and values
+func (a *AddRequest) Attribute(attrType string, attrVals []string) {
+	a.Attributes = append(a.Attributes, Attribute{Type: attrType, Vals: attrVals})
+}
+
+// NewAddRequest returns an AddRequest for the given DN, with no attributes
+func NewAddRequest(dn string) *AddRequest {
+	return &AddRequest{
+		DN: dn,
+	}
+
+}
+
+// Add performs the given AddRequest
+func (l *Conn) Add(addRequest *AddRequest) error {
+	packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+	packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+	packet.AppendChild(addRequest.encode())
+
+	l.Debug.PrintPacket(packet)
+
+	msgCtx, err := l.sendMessage(packet)
+	if err != nil {
+		return err
+	}
+	defer l.finishMessage(msgCtx)
+
+	l.Debug.Printf("%d: waiting for response", msgCtx.id)
+	packetResponse, ok := <-msgCtx.responses
+	if !ok {
+		return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+	}
+	packet, err = packetResponse.ReadPacket()
+	l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+	if err != nil {
+		return err
+	}
+
+	if l.Debug {
+		if err := addLDAPDescriptions(packet); err != nil {
+			return err
+		}
+		ber.PrintPacket(packet)
+	}
+
+	if packet.Children[1].Tag == ApplicationAddResponse {
+		resultCode, resultDescription := getLDAPResultCode(packet)
+		if resultCode != 0 {
+			return NewError(resultCode, errors.New(resultDescription))
+		}
+	} else {
+		log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
+	}
+
+	l.Debug.Printf("%d: returning", msgCtx.id)
+	return nil
+}

+ 143 - 0
vendor/gopkg.in/ldap.v2/bind.go

@@ -0,0 +1,143 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ldap
+
+import (
+	"errors"
+
+	"gopkg.in/asn1-ber.v1"
+)
+
+// SimpleBindRequest represents a username/password bind operation
+type SimpleBindRequest struct {
+	// Username is the name of the Directory object that the client wishes to bind as
+	Username string
+	// Password is the credentials to bind with
+	Password string
+	// Controls are optional controls to send with the bind request
+	Controls []Control
+}
+
+// SimpleBindResult contains the response from the server
+type SimpleBindResult struct {
+	Controls []Control
+}
+
+// NewSimpleBindRequest returns a bind request
+func NewSimpleBindRequest(username string, password string, controls []Control) *SimpleBindRequest {
+	return &SimpleBindRequest{
+		Username: username,
+		Password: password,
+		Controls: controls,
+	}
+}
+
+func (bindRequest *SimpleBindRequest) encode() *ber.Packet {
+	request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request")
+	request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version"))
+	request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, bindRequest.Username, "User Name"))
+	request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, bindRequest.Password, "Password"))
+
+	request.AppendChild(encodeControls(bindRequest.Controls))
+
+	return request
+}
+
+// SimpleBind performs the simple bind operation defined in the given request
+func (l *Conn) SimpleBind(simpleBindRequest *SimpleBindRequest) (*SimpleBindResult, error) {
+	packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+	packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+	encodedBindRequest := simpleBindRequest.encode()
+	packet.AppendChild(encodedBindRequest)
+
+	if l.Debug {
+		ber.PrintPacket(packet)
+	}
+
+	msgCtx, err := l.sendMessage(packet)
+	if err != nil {
+		return nil, err
+	}
+	defer l.finishMessage(msgCtx)
+
+	packetResponse, ok := <-msgCtx.responses
+	if !ok {
+		return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+	}
+	packet, err = packetResponse.ReadPacket()
+	l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+	if err != nil {
+		return nil, err
+	}
+
+	if l.Debug {
+		if err := addLDAPDescriptions(packet); err != nil {
+			return nil, err
+		}
+		ber.PrintPacket(packet)
+	}
+
+	result := &SimpleBindResult{
+		Controls: make([]Control, 0),
+	}
+
+	if len(packet.Children) == 3 {
+		for _, child := range packet.Children[2].Children {
+			result.Controls = append(result.Controls, DecodeControl(child))
+		}
+	}
+
+	resultCode, resultDescription := getLDAPResultCode(packet)
+	if resultCode != 0 {
+		return result, NewError(resultCode, errors.New(resultDescription))
+	}
+
+	return result, nil
+}
+
+// Bind performs a bind with the given username and password
+func (l *Conn) Bind(username, password string) error {
+	packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+	packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+	bindRequest := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request")
+	bindRequest.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version"))
+	bindRequest.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, username, "User Name"))
+	bindRequest.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, password, "Password"))
+	packet.AppendChild(bindRequest)
+
+	if l.Debug {
+		ber.PrintPacket(packet)
+	}
+
+	msgCtx, err := l.sendMessage(packet)
+	if err != nil {
+		return err
+	}
+	defer l.finishMessage(msgCtx)
+
+	packetResponse, ok := <-msgCtx.responses
+	if !ok {
+		return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+	}
+	packet, err = packetResponse.ReadPacket()
+	l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+	if err != nil {
+		return err
+	}
+
+	if l.Debug {
+		if err := addLDAPDescriptions(packet); err != nil {
+			return err
+		}
+		ber.PrintPacket(packet)
+	}
+
+	resultCode, resultDescription := getLDAPResultCode(packet)
+	if resultCode != 0 {
+		return NewError(resultCode, errors.New(resultDescription))
+	}
+
+	return nil
+}

+ 27 - 0
vendor/gopkg.in/ldap.v2/client.go

@@ -0,0 +1,27 @@
+package ldap
+
+import (
+	"crypto/tls"
+	"time"
+)
+
+// Client knows how to interact with an LDAP server
+type Client interface {
+	Start()
+	StartTLS(config *tls.Config) error
+	Close()
+	SetTimeout(time.Duration)
+
+	Bind(username, password string) error
+	SimpleBind(simpleBindRequest *SimpleBindRequest) (*SimpleBindResult, error)
+
+	Add(addRequest *AddRequest) error
+	Del(delRequest *DelRequest) error
+	Modify(modifyRequest *ModifyRequest) error
+
+	Compare(dn, attribute, value string) (bool, error)
+	PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*PasswordModifyResult, error)
+
+	Search(searchRequest *SearchRequest) (*SearchResult, error)
+	SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error)
+}

+ 85 - 0
vendor/gopkg.in/ldap.v2/compare.go

@@ -0,0 +1,85 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// File contains Compare functionality
+//
+// https://tools.ietf.org/html/rfc4511
+//
+// CompareRequest ::= [APPLICATION 14] SEQUENCE {
+//              entry           LDAPDN,
+//              ava             AttributeValueAssertion }
+//
+// AttributeValueAssertion ::= SEQUENCE {
+//              attributeDesc   AttributeDescription,
+//              assertionValue  AssertionValue }
+//
+// AttributeDescription ::= LDAPString
+//                         -- Constrained to <attributedescription>
+//                         -- [RFC4512]
+//
+// AttributeValue ::= OCTET STRING
+//
+
+package ldap
+
+import (
+	"errors"
+	"fmt"
+
+	"gopkg.in/asn1-ber.v1"
+)
+
+// Compare checks to see if the attribute of the dn matches value. Returns true if it does otherwise
+// false with any error that occurs if any.
+func (l *Conn) Compare(dn, attribute, value string) (bool, error) {
+	packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+	packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+
+	request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationCompareRequest, nil, "Compare Request")
+	request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, dn, "DN"))
+
+	ava := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "AttributeValueAssertion")
+	ava.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "AttributeDesc"))
+	ava.AppendChild(ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagOctetString, value, "AssertionValue"))
+	request.AppendChild(ava)
+	packet.AppendChild(request)
+
+	l.Debug.PrintPacket(packet)
+
+	msgCtx, err := l.sendMessage(packet)
+	if err != nil {
+		return false, err
+	}
+	defer l.finishMessage(msgCtx)
+
+	l.Debug.Printf("%d: waiting for response", msgCtx.id)
+	packetResponse, ok := <-msgCtx.responses
+	if !ok {
+		return false, NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+	}
+	packet, err = packetResponse.ReadPacket()
+	l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+	if err != nil {
+		return false, err
+	}
+
+	if l.Debug {
+		if err := addLDAPDescriptions(packet); err != nil {
+			return false, err
+		}
+		ber.PrintPacket(packet)
+	}
+
+	if packet.Children[1].Tag == ApplicationCompareResponse {
+		resultCode, resultDescription := getLDAPResultCode(packet)
+		if resultCode == LDAPResultCompareTrue {
+			return true, nil
+		} else if resultCode == LDAPResultCompareFalse {
+			return false, nil
+		} else {
+			return false, NewError(resultCode, errors.New(resultDescription))
+		}
+	}
+	return false, fmt.Errorf("Unexpected Response: %d", packet.Children[1].Tag)
+}

+ 467 - 0
vendor/gopkg.in/ldap.v2/conn.go

@@ -0,0 +1,467 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ldap
+
+import (
+	"crypto/tls"
+	"errors"
+	"fmt"
+	"log"
+	"net"
+	"sync"
+	"time"
+
+	"gopkg.in/asn1-ber.v1"
+)
+
+const (
+	// MessageQuit causes the processMessages loop to exit
+	MessageQuit = 0
+	// MessageRequest sends a request to the server
+	MessageRequest = 1
+	// MessageResponse receives a response from the server
+	MessageResponse = 2
+	// MessageFinish indicates the client considers a particular message ID to be finished
+	MessageFinish = 3
+	// MessageTimeout indicates the client-specified timeout for a particular message ID has been reached
+	MessageTimeout = 4
+)
+
+// PacketResponse contains the packet or error encountered reading a response
+type PacketResponse struct {
+	// Packet is the packet read from the server
+	Packet *ber.Packet
+	// Error is an error encountered while reading
+	Error error
+}
+
+// ReadPacket returns the packet or an error
+func (pr *PacketResponse) ReadPacket() (*ber.Packet, error) {
+	if (pr == nil) || (pr.Packet == nil && pr.Error == nil) {
+		return nil, NewError(ErrorNetwork, errors.New("ldap: could not retrieve response"))
+	}
+	return pr.Packet, pr.Error
+}
+
+type messageContext struct {
+	id int64
+	// close(done) should only be called from finishMessage()
+	done chan struct{}
+	// close(responses) should only be called from processMessages(), and only sent to from sendResponse()
+	responses chan *PacketResponse
+}
+
+// sendResponse should only be called within the processMessages() loop which
+// is also responsible for closing the responses channel.
+func (msgCtx *messageContext) sendResponse(packet *PacketResponse) {
+	select {
+	case msgCtx.responses <- packet:
+		// Successfully sent packet to message handler.
+	case <-msgCtx.done:
+		// The request handler is done and will not receive more
+		// packets.
+	}
+}
+
+type messagePacket struct {
+	Op        int
+	MessageID int64
+	Packet    *ber.Packet
+	Context   *messageContext
+}
+
+type sendMessageFlags uint
+
+const (
+	startTLS sendMessageFlags = 1 << iota
+)
+
+// Conn represents an LDAP Connection
+type Conn struct {
+	conn                net.Conn
+	isTLS               bool
+	isClosing           bool
+	closeErr            error
+	isStartingTLS       bool
+	Debug               debugging
+	chanConfirm         chan bool
+	messageContexts     map[int64]*messageContext
+	chanMessage         chan *messagePacket
+	chanMessageID       chan int64
+	wgSender            sync.WaitGroup
+	wgClose             sync.WaitGroup
+	once                sync.Once
+	outstandingRequests uint
+	messageMutex        sync.Mutex
+	requestTimeout      time.Duration
+}
+
+var _ Client = &Conn{}
+
+// DefaultTimeout is a package-level variable that sets the timeout value
+// used for the Dial and DialTLS methods.
+//
+// WARNING: since this is a package-level variable, setting this value from
+// multiple places will probably result in undesired behaviour.
+var DefaultTimeout = 60 * time.Second
+
+// Dial connects to the given address on the given network using net.Dial
+// and then returns a new Conn for the connection.
+func Dial(network, addr string) (*Conn, error) {
+	c, err := net.DialTimeout(network, addr, DefaultTimeout)
+	if err != nil {
+		return nil, NewError(ErrorNetwork, err)
+	}
+	conn := NewConn(c, false)
+	conn.Start()
+	return conn, nil
+}
+
+// DialTLS connects to the given address on the given network using tls.Dial
+// and then returns a new Conn for the connection.
+func DialTLS(network, addr string, config *tls.Config) (*Conn, error) {
+	dc, err := net.DialTimeout(network, addr, DefaultTimeout)
+	if err != nil {
+		return nil, NewError(ErrorNetwork, err)
+	}
+	c := tls.Client(dc, config)
+	err = c.Handshake()
+	if err != nil {
+		// Handshake error, close the established connection before we return an error
+		dc.Close()
+		return nil, NewError(ErrorNetwork, err)
+	}
+	conn := NewConn(c, true)
+	conn.Start()
+	return conn, nil
+}
+
+// NewConn returns a new Conn using conn for network I/O.
+func NewConn(conn net.Conn, isTLS bool) *Conn {
+	return &Conn{
+		conn:            conn,
+		chanConfirm:     make(chan bool),
+		chanMessageID:   make(chan int64),
+		chanMessage:     make(chan *messagePacket, 10),
+		messageContexts: map[int64]*messageContext{},
+		requestTimeout:  0,
+		isTLS:           isTLS,
+	}
+}
+
+// Start initializes goroutines to read responses and process messages
+func (l *Conn) Start() {
+	go l.reader()
+	go l.processMessages()
+	l.wgClose.Add(1)
+}
+
+// Close closes the connection.
+func (l *Conn) Close() {
+	l.once.Do(func() {
+		l.isClosing = true
+		l.wgSender.Wait()
+
+		l.Debug.Printf("Sending quit message and waiting for confirmation")
+		l.chanMessage <- &messagePacket{Op: MessageQuit}
+		<-l.chanConfirm
+		close(l.chanMessage)
+
+		l.Debug.Printf("Closing network connection")
+		if err := l.conn.Close(); err != nil {
+			log.Print(err)
+		}
+
+		l.wgClose.Done()
+	})
+	l.wgClose.Wait()
+}
+
+// SetTimeout sets the time after a request is sent that a MessageTimeout triggers
+func (l *Conn) SetTimeout(timeout time.Duration) {
+	if timeout > 0 {
+		l.requestTimeout = timeout
+	}
+}
+
+// Returns the next available messageID
+func (l *Conn) nextMessageID() int64 {
+	if l.chanMessageID != nil {
+		if messageID, ok := <-l.chanMessageID; ok {
+			return messageID
+		}
+	}
+	return 0
+}
+
+// StartTLS sends the command to start a TLS session and then creates a new TLS Client
+func (l *Conn) StartTLS(config *tls.Config) error {
+	if l.isTLS {
+		return NewError(ErrorNetwork, errors.New("ldap: already encrypted"))
+	}
+
+	packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+	packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+	request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Start TLS")
+	request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, "1.3.6.1.4.1.1466.20037", "TLS Extended Command"))
+	packet.AppendChild(request)
+	l.Debug.PrintPacket(packet)
+
+	msgCtx, err := l.sendMessageWithFlags(packet, startTLS)
+	if err != nil {
+		return err
+	}
+	defer l.finishMessage(msgCtx)
+
+	l.Debug.Printf("%d: waiting for response", msgCtx.id)
+
+	packetResponse, ok := <-msgCtx.responses
+	if !ok {
+		return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+	}
+	packet, err = packetResponse.ReadPacket()
+	l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+	if err != nil {
+		return err
+	}
+
+	if l.Debug {
+		if err := addLDAPDescriptions(packet); err != nil {
+			l.Close()
+			return err
+		}
+		ber.PrintPacket(packet)
+	}
+
+	if resultCode, message := getLDAPResultCode(packet); resultCode == LDAPResultSuccess {
+		conn := tls.Client(l.conn, config)
+
+		if err := conn.Handshake(); err != nil {
+			l.Close()
+			return NewError(ErrorNetwork, fmt.Errorf("TLS handshake failed (%v)", err))
+		}
+
+		l.isTLS = true
+		l.conn = conn
+	} else {
+		return NewError(resultCode, fmt.Errorf("ldap: cannot StartTLS (%s)", message))
+	}
+	go l.reader()
+
+	return nil
+}
+
+func (l *Conn) sendMessage(packet *ber.Packet) (*messageContext, error) {
+	return l.sendMessageWithFlags(packet, 0)
+}
+
+func (l *Conn) sendMessageWithFlags(packet *ber.Packet, flags sendMessageFlags) (*messageContext, error) {
+	if l.isClosing {
+		return nil, NewError(ErrorNetwork, errors.New("ldap: connection closed"))
+	}
+	l.messageMutex.Lock()
+	l.Debug.Printf("flags&startTLS = %d", flags&startTLS)
+	if l.isStartingTLS {
+		l.messageMutex.Unlock()
+		return nil, NewError(ErrorNetwork, errors.New("ldap: connection is in startls phase"))
+	}
+	if flags&startTLS != 0 {
+		if l.outstandingRequests != 0 {
+			l.messageMutex.Unlock()
+			return nil, NewError(ErrorNetwork, errors.New("ldap: cannot StartTLS with outstanding requests"))
+		}
+		l.isStartingTLS = true
+	}
+	l.outstandingRequests++
+
+	l.messageMutex.Unlock()
+
+	responses := make(chan *PacketResponse)
+	messageID := packet.Children[0].Value.(int64)
+	message := &messagePacket{
+		Op:        MessageRequest,
+		MessageID: messageID,
+		Packet:    packet,
+		Context: &messageContext{
+			id:        messageID,
+			done:      make(chan struct{}),
+			responses: responses,
+		},
+	}
+	l.sendProcessMessage(message)
+	return message.Context, nil
+}
+
+func (l *Conn) finishMessage(msgCtx *messageContext) {
+	close(msgCtx.done)
+
+	if l.isClosing {
+		return
+	}
+
+	l.messageMutex.Lock()
+	l.outstandingRequests--
+	if l.isStartingTLS {
+		l.isStartingTLS = false
+	}
+	l.messageMutex.Unlock()
+
+	message := &messagePacket{
+		Op:        MessageFinish,
+		MessageID: msgCtx.id,
+	}
+	l.sendProcessMessage(message)
+}
+
+func (l *Conn) sendProcessMessage(message *messagePacket) bool {
+	if l.isClosing {
+		return false
+	}
+	l.wgSender.Add(1)
+	l.chanMessage <- message
+	l.wgSender.Done()
+	return true
+}
+
+func (l *Conn) processMessages() {
+	defer func() {
+		if err := recover(); err != nil {
+			log.Printf("ldap: recovered panic in processMessages: %v", err)
+		}
+		for messageID, msgCtx := range l.messageContexts {
+			// If we are closing due to an error, inform anyone who
+			// is waiting about the error.
+			if l.isClosing && l.closeErr != nil {
+				msgCtx.sendResponse(&PacketResponse{Error: l.closeErr})
+			}
+			l.Debug.Printf("Closing channel for MessageID %d", messageID)
+			close(msgCtx.responses)
+			delete(l.messageContexts, messageID)
+		}
+		close(l.chanMessageID)
+		l.chanConfirm <- true
+		close(l.chanConfirm)
+	}()
+
+	var messageID int64 = 1
+	for {
+		select {
+		case l.chanMessageID <- messageID:
+			messageID++
+		case message, ok := <-l.chanMessage:
+			if !ok {
+				l.Debug.Printf("Shutting down - message channel is closed")
+				return
+			}
+			switch message.Op {
+			case MessageQuit:
+				l.Debug.Printf("Shutting down - quit message received")
+				return
+			case MessageRequest:
+				// Add to message list and write to network
+				l.Debug.Printf("Sending message %d", message.MessageID)
+
+				buf := message.Packet.Bytes()
+				_, err := l.conn.Write(buf)
+				if err != nil {
+					l.Debug.Printf("Error Sending Message: %s", err.Error())
+					message.Context.sendResponse(&PacketResponse{Error: fmt.Errorf("unable to send request: %s", err)})
+					close(message.Context.responses)
+					break
+				}
+
+				// Only add to messageContexts if we were able to
+				// successfully write the message.
+				l.messageContexts[message.MessageID] = message.Context
+
+				// Add timeout if defined
+				if l.requestTimeout > 0 {
+					go func() {
+						defer func() {
+							if err := recover(); err != nil {
+								log.Printf("ldap: recovered panic in RequestTimeout: %v", err)
+							}
+						}()
+						time.Sleep(l.requestTimeout)
+						timeoutMessage := &messagePacket{
+							Op:        MessageTimeout,
+							MessageID: message.MessageID,
+						}
+						l.sendProcessMessage(timeoutMessage)
+					}()
+				}
+			case MessageResponse:
+				l.Debug.Printf("Receiving message %d", message.MessageID)
+				if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
+					msgCtx.sendResponse(&PacketResponse{message.Packet, nil})
+				} else {
+					log.Printf("Received unexpected message %d, %v", message.MessageID, l.isClosing)
+					ber.PrintPacket(message.Packet)
+				}
+			case MessageTimeout:
+				// Handle the timeout by closing the channel
+				// All reads will return immediately
+				if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
+					l.Debug.Printf("Receiving message timeout for %d", message.MessageID)
+					msgCtx.sendResponse(&PacketResponse{message.Packet, errors.New("ldap: connection timed out")})
+					delete(l.messageContexts, message.MessageID)
+					close(msgCtx.responses)
+				}
+			case MessageFinish:
+				l.Debug.Printf("Finished message %d", message.MessageID)
+				if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
+					delete(l.messageContexts, message.MessageID)
+					close(msgCtx.responses)
+				}
+			}
+		}
+	}
+}
+
+func (l *Conn) reader() {
+	cleanstop := false
+	defer func() {
+		if err := recover(); err != nil {
+			log.Printf("ldap: recovered panic in reader: %v", err)
+		}
+		if !cleanstop {
+			l.Close()
+		}
+	}()
+
+	for {
+		if cleanstop {
+			l.Debug.Printf("reader clean stopping (without closing the connection)")
+			return
+		}
+		packet, err := ber.ReadPacket(l.conn)
+		if err != nil {
+			// A read error is expected here if we are closing the connection...
+			if !l.isClosing {
+				l.closeErr = fmt.Errorf("unable to read LDAP response packet: %s", err)
+				l.Debug.Printf("reader error: %s", err.Error())
+			}
+			return
+		}
+		addLDAPDescriptions(packet)
+		if len(packet.Children) == 0 {
+			l.Debug.Printf("Received bad ldap packet")
+			continue
+		}
+		l.messageMutex.Lock()
+		if l.isStartingTLS {
+			cleanstop = true
+		}
+		l.messageMutex.Unlock()
+		message := &messagePacket{
+			Op:        MessageResponse,
+			MessageID: packet.Children[0].Value.(int64),
+			Packet:    packet,
+		}
+		if !l.sendProcessMessage(message) {
+			return
+		}
+	}
+}

+ 420 - 0
vendor/gopkg.in/ldap.v2/control.go

@@ -0,0 +1,420 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ldap
+
+import (
+	"fmt"
+	"strconv"
+
+	"gopkg.in/asn1-ber.v1"
+)
+
+const (
+	// ControlTypePaging - https://www.ietf.org/rfc/rfc2696.txt
+	ControlTypePaging = "1.2.840.113556.1.4.319"
+	// ControlTypeBeheraPasswordPolicy - https://tools.ietf.org/html/draft-behera-ldap-password-policy-10
+	ControlTypeBeheraPasswordPolicy = "1.3.6.1.4.1.42.2.27.8.5.1"
+	// ControlTypeVChuPasswordMustChange - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
+	ControlTypeVChuPasswordMustChange = "2.16.840.1.113730.3.4.4"
+	// ControlTypeVChuPasswordWarning - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
+	ControlTypeVChuPasswordWarning = "2.16.840.1.113730.3.4.5"
+	// ControlTypeManageDsaIT - https://tools.ietf.org/html/rfc3296
+	ControlTypeManageDsaIT = "2.16.840.1.113730.3.4.2"
+)
+
+// ControlTypeMap maps controls to text descriptions
+var ControlTypeMap = map[string]string{
+	ControlTypePaging:               "Paging",
+	ControlTypeBeheraPasswordPolicy: "Password Policy - Behera Draft",
+	ControlTypeManageDsaIT:          "Manage DSA IT",
+}
+
+// Control defines an interface controls provide to encode and describe themselves
+type Control interface {
+	// GetControlType returns the OID
+	GetControlType() string
+	// Encode returns the ber packet representation
+	Encode() *ber.Packet
+	// String returns a human-readable description
+	String() string
+}
+
+// ControlString implements the Control interface for simple controls
+type ControlString struct {
+	ControlType  string
+	Criticality  bool
+	ControlValue string
+}
+
+// GetControlType returns the OID
+func (c *ControlString) GetControlType() string {
+	return c.ControlType
+}
+
+// Encode returns the ber packet representation
+func (c *ControlString) Encode() *ber.Packet {
+	packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
+	packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, c.ControlType, "Control Type ("+ControlTypeMap[c.ControlType]+")"))
+	if c.Criticality {
+		packet.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality"))
+	}
+	packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, string(c.ControlValue), "Control Value"))
+	return packet
+}
+
+// String returns a human-readable description
+func (c *ControlString) String() string {
+	return fmt.Sprintf("Control Type: %s (%q)  Criticality: %t  Control Value: %s", ControlTypeMap[c.ControlType], c.ControlType, c.Criticality, c.ControlValue)
+}
+
+// ControlPaging implements the paging control described in https://www.ietf.org/rfc/rfc2696.txt
+type ControlPaging struct {
+	// PagingSize indicates the page size
+	PagingSize uint32
+	// Cookie is an opaque value returned by the server to track a paging cursor
+	Cookie []byte
+}
+
+// GetControlType returns the OID
+func (c *ControlPaging) GetControlType() string {
+	return ControlTypePaging
+}
+
+// Encode returns the ber packet representation
+func (c *ControlPaging) Encode() *ber.Packet {
+	packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
+	packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypePaging, "Control Type ("+ControlTypeMap[ControlTypePaging]+")"))
+
+	p2 := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Control Value (Paging)")
+	seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Search Control Value")
+	seq.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, int64(c.PagingSize), "Paging Size"))
+	cookie := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Cookie")
+	cookie.Value = c.Cookie
+	cookie.Data.Write(c.Cookie)
+	seq.AppendChild(cookie)
+	p2.AppendChild(seq)
+
+	packet.AppendChild(p2)
+	return packet
+}
+
+// String returns a human-readable description
+func (c *ControlPaging) String() string {
+	return fmt.Sprintf(
+		"Control Type: %s (%q)  Criticality: %t  PagingSize: %d  Cookie: %q",
+		ControlTypeMap[ControlTypePaging],
+		ControlTypePaging,
+		false,
+		c.PagingSize,
+		c.Cookie)
+}
+
+// SetCookie stores the given cookie in the paging control
+func (c *ControlPaging) SetCookie(cookie []byte) {
+	c.Cookie = cookie
+}
+
+// ControlBeheraPasswordPolicy implements the control described in https://tools.ietf.org/html/draft-behera-ldap-password-policy-10
+type ControlBeheraPasswordPolicy struct {
+	// Expire contains the number of seconds before a password will expire
+	Expire int64
+	// Grace indicates the remaining number of times a user will be allowed to authenticate with an expired password
+	Grace int64
+	// Error indicates the error code
+	Error int8
+	// ErrorString is a human readable error
+	ErrorString string
+}
+
+// GetControlType returns the OID
+func (c *ControlBeheraPasswordPolicy) GetControlType() string {
+	return ControlTypeBeheraPasswordPolicy
+}
+
+// Encode returns the ber packet representation
+func (c *ControlBeheraPasswordPolicy) Encode() *ber.Packet {
+	packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
+	packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeBeheraPasswordPolicy, "Control Type ("+ControlTypeMap[ControlTypeBeheraPasswordPolicy]+")"))
+
+	return packet
+}
+
+// String returns a human-readable description
+func (c *ControlBeheraPasswordPolicy) String() string {
+	return fmt.Sprintf(
+		"Control Type: %s (%q)  Criticality: %t  Expire: %d  Grace: %d  Error: %d, ErrorString: %s",
+		ControlTypeMap[ControlTypeBeheraPasswordPolicy],
+		ControlTypeBeheraPasswordPolicy,
+		false,
+		c.Expire,
+		c.Grace,
+		c.Error,
+		c.ErrorString)
+}
+
+// ControlVChuPasswordMustChange implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
+type ControlVChuPasswordMustChange struct {
+	// MustChange indicates if the password is required to be changed
+	MustChange bool
+}
+
+// GetControlType returns the OID
+func (c *ControlVChuPasswordMustChange) GetControlType() string {
+	return ControlTypeVChuPasswordMustChange
+}
+
+// Encode returns the ber packet representation
+func (c *ControlVChuPasswordMustChange) Encode() *ber.Packet {
+	return nil
+}
+
+// String returns a human-readable description
+func (c *ControlVChuPasswordMustChange) String() string {
+	return fmt.Sprintf(
+		"Control Type: %s (%q)  Criticality: %t  MustChange: %v",
+		ControlTypeMap[ControlTypeVChuPasswordMustChange],
+		ControlTypeVChuPasswordMustChange,
+		false,
+		c.MustChange)
+}
+
+// ControlVChuPasswordWarning implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
+type ControlVChuPasswordWarning struct {
+	// Expire indicates the time in seconds until the password expires
+	Expire int64
+}
+
+// GetControlType returns the OID
+func (c *ControlVChuPasswordWarning) GetControlType() string {
+	return ControlTypeVChuPasswordWarning
+}
+
+// Encode returns the ber packet representation
+func (c *ControlVChuPasswordWarning) Encode() *ber.Packet {
+	return nil
+}
+
+// String returns a human-readable description
+func (c *ControlVChuPasswordWarning) String() string {
+	return fmt.Sprintf(
+		"Control Type: %s (%q)  Criticality: %t  Expire: %b",
+		ControlTypeMap[ControlTypeVChuPasswordWarning],
+		ControlTypeVChuPasswordWarning,
+		false,
+		c.Expire)
+}
+
+// ControlManageDsaIT implements the control described in https://tools.ietf.org/html/rfc3296
+type ControlManageDsaIT struct {
+	// Criticality indicates if this control is required
+	Criticality bool
+}
+
+// GetControlType returns the OID
+func (c *ControlManageDsaIT) GetControlType() string {
+	return ControlTypeManageDsaIT
+}
+
+// Encode returns the ber packet representation
+func (c *ControlManageDsaIT) Encode() *ber.Packet {
+	//FIXME
+	packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
+	packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeManageDsaIT, "Control Type ("+ControlTypeMap[ControlTypeManageDsaIT]+")"))
+	if c.Criticality {
+		packet.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality"))
+	}
+	return packet
+}
+
+// String returns a human-readable description
+func (c *ControlManageDsaIT) String() string {
+	return fmt.Sprintf(
+		"Control Type: %s (%q)  Criticality: %t",
+		ControlTypeMap[ControlTypeManageDsaIT],
+		ControlTypeManageDsaIT,
+		c.Criticality)
+}
+
+// NewControlManageDsaIT returns a ControlManageDsaIT control
+func NewControlManageDsaIT(Criticality bool) *ControlManageDsaIT {
+	return &ControlManageDsaIT{Criticality: Criticality}
+}
+
+// FindControl returns the first control of the given type in the list, or nil
+func FindControl(controls []Control, controlType string) Control {
+	for _, c := range controls {
+		if c.GetControlType() == controlType {
+			return c
+		}
+	}
+	return nil
+}
+
+// DecodeControl returns a control read from the given packet, or nil if no recognized control can be made
+func DecodeControl(packet *ber.Packet) Control {
+	var (
+		ControlType = ""
+		Criticality = false
+		value       *ber.Packet
+	)
+
+	switch len(packet.Children) {
+	case 0:
+		// at least one child is required for control type
+		return nil
+
+	case 1:
+		// just type, no criticality or value
+		packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")"
+		ControlType = packet.Children[0].Value.(string)
+
+	case 2:
+		packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")"
+		ControlType = packet.Children[0].Value.(string)
+
+		// Children[1] could be criticality or value (both are optional)
+		// duck-type on whether this is a boolean
+		if _, ok := packet.Children[1].Value.(bool); ok {
+			packet.Children[1].Description = "Criticality"
+			Criticality = packet.Children[1].Value.(bool)
+		} else {
+			packet.Children[1].Description = "Control Value"
+			value = packet.Children[1]
+		}
+
+	case 3:
+		packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")"
+		ControlType = packet.Children[0].Value.(string)
+
+		packet.Children[1].Description = "Criticality"
+		Criticality = packet.Children[1].Value.(bool)
+
+		packet.Children[2].Description = "Control Value"
+		value = packet.Children[2]
+
+	default:
+		// more than 3 children is invalid
+		return nil
+	}
+
+	switch ControlType {
+	case ControlTypeManageDsaIT:
+		return NewControlManageDsaIT(Criticality)
+	case ControlTypePaging:
+		value.Description += " (Paging)"
+		c := new(ControlPaging)
+		if value.Value != nil {
+			valueChildren := ber.DecodePacket(value.Data.Bytes())
+			value.Data.Truncate(0)
+			value.Value = nil
+			value.AppendChild(valueChildren)
+		}
+		value = value.Children[0]
+		value.Description = "Search Control Value"
+		value.Children[0].Description = "Paging Size"
+		value.Children[1].Description = "Cookie"
+		c.PagingSize = uint32(value.Children[0].Value.(int64))
+		c.Cookie = value.Children[1].Data.Bytes()
+		value.Children[1].Value = c.Cookie
+		return c
+	case ControlTypeBeheraPasswordPolicy:
+		value.Description += " (Password Policy - Behera)"
+		c := NewControlBeheraPasswordPolicy()
+		if value.Value != nil {
+			valueChildren := ber.DecodePacket(value.Data.Bytes())
+			value.Data.Truncate(0)
+			value.Value = nil
+			value.AppendChild(valueChildren)
+		}
+
+		sequence := value.Children[0]
+
+		for _, child := range sequence.Children {
+			if child.Tag == 0 {
+				//Warning
+				warningPacket := child.Children[0]
+				packet := ber.DecodePacket(warningPacket.Data.Bytes())
+				val, ok := packet.Value.(int64)
+				if ok {
+					if warningPacket.Tag == 0 {
+						//timeBeforeExpiration
+						c.Expire = val
+						warningPacket.Value = c.Expire
+					} else if warningPacket.Tag == 1 {
+						//graceAuthNsRemaining
+						c.Grace = val
+						warningPacket.Value = c.Grace
+					}
+				}
+			} else if child.Tag == 1 {
+				// Error
+				packet := ber.DecodePacket(child.Data.Bytes())
+				val, ok := packet.Value.(int8)
+				if !ok {
+					// what to do?
+					val = -1
+				}
+				c.Error = val
+				child.Value = c.Error
+				c.ErrorString = BeheraPasswordPolicyErrorMap[c.Error]
+			}
+		}
+		return c
+	case ControlTypeVChuPasswordMustChange:
+		c := &ControlVChuPasswordMustChange{MustChange: true}
+		return c
+	case ControlTypeVChuPasswordWarning:
+		c := &ControlVChuPasswordWarning{Expire: -1}
+		expireStr := ber.DecodeString(value.Data.Bytes())
+
+		expire, err := strconv.ParseInt(expireStr, 10, 64)
+		if err != nil {
+			return nil
+		}
+		c.Expire = expire
+		value.Value = c.Expire
+
+		return c
+	default:
+		c := new(ControlString)
+		c.ControlType = ControlType
+		c.Criticality = Criticality
+		if value != nil {
+			c.ControlValue = value.Value.(string)
+		}
+		return c
+	}
+}
+
+// NewControlString returns a generic control
+func NewControlString(controlType string, criticality bool, controlValue string) *ControlString {
+	return &ControlString{
+		ControlType:  controlType,
+		Criticality:  criticality,
+		ControlValue: controlValue,
+	}
+}
+
+// NewControlPaging returns a paging control
+func NewControlPaging(pagingSize uint32) *ControlPaging {
+	return &ControlPaging{PagingSize: pagingSize}
+}
+
+// NewControlBeheraPasswordPolicy returns a ControlBeheraPasswordPolicy
+func NewControlBeheraPasswordPolicy() *ControlBeheraPasswordPolicy {
+	return &ControlBeheraPasswordPolicy{
+		Expire: -1,
+		Grace:  -1,
+		Error:  -1,
+	}
+}
+
+func encodeControls(controls []Control) *ber.Packet {
+	packet := ber.Encode(ber.ClassContext, ber.TypeConstructed, 0, nil, "Controls")
+	for _, control := range controls {
+		packet.AppendChild(control.Encode())
+	}
+	return packet
+}

+ 24 - 0
vendor/gopkg.in/ldap.v2/debug.go

@@ -0,0 +1,24 @@
+package ldap
+
+import (
+	"log"
+
+	"gopkg.in/asn1-ber.v1"
+)
+
+// debbuging type
+//     - has a Printf method to write the debug output
+type debugging bool
+
+// write debug output
+func (debug debugging) Printf(format string, args ...interface{}) {
+	if debug {
+		log.Printf(format, args...)
+	}
+}
+
+func (debug debugging) PrintPacket(packet *ber.Packet) {
+	if debug {
+		ber.PrintPacket(packet)
+	}
+}

+ 84 - 0
vendor/gopkg.in/ldap.v2/del.go

@@ -0,0 +1,84 @@
+//
+// https://tools.ietf.org/html/rfc4511
+//
+// DelRequest ::= [APPLICATION 10] LDAPDN
+
+package ldap
+
+import (
+	"errors"
+	"log"
+
+	"gopkg.in/asn1-ber.v1"
+)
+
+// DelRequest implements an LDAP deletion request
+type DelRequest struct {
+	// DN is the name of the directory entry to delete
+	DN string
+	// Controls hold optional controls to send with the request
+	Controls []Control
+}
+
+func (d DelRequest) encode() *ber.Packet {
+	request := ber.Encode(ber.ClassApplication, ber.TypePrimitive, ApplicationDelRequest, d.DN, "Del Request")
+	request.Data.Write([]byte(d.DN))
+	return request
+}
+
+// NewDelRequest creates a delete request for the given DN and controls
+func NewDelRequest(DN string,
+	Controls []Control) *DelRequest {
+	return &DelRequest{
+		DN:       DN,
+		Controls: Controls,
+	}
+}
+
+// Del executes the given delete request
+func (l *Conn) Del(delRequest *DelRequest) error {
+	packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+	packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+	packet.AppendChild(delRequest.encode())
+	if delRequest.Controls != nil {
+		packet.AppendChild(encodeControls(delRequest.Controls))
+	}
+
+	l.Debug.PrintPacket(packet)
+
+	msgCtx, err := l.sendMessage(packet)
+	if err != nil {
+		return err
+	}
+	defer l.finishMessage(msgCtx)
+
+	l.Debug.Printf("%d: waiting for response", msgCtx.id)
+	packetResponse, ok := <-msgCtx.responses
+	if !ok {
+		return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+	}
+	packet, err = packetResponse.ReadPacket()
+	l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+	if err != nil {
+		return err
+	}
+
+	if l.Debug {
+		if err := addLDAPDescriptions(packet); err != nil {
+			return err
+		}
+		ber.PrintPacket(packet)
+	}
+
+	if packet.Children[1].Tag == ApplicationDelResponse {
+		resultCode, resultDescription := getLDAPResultCode(packet)
+		if resultCode != 0 {
+			return NewError(resultCode, errors.New(resultDescription))
+		}
+	} else {
+		log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
+	}
+
+	l.Debug.Printf("%d: returning", msgCtx.id)
+	return nil
+}

+ 244 - 0
vendor/gopkg.in/ldap.v2/dn.go

@@ -0,0 +1,244 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// File contains DN parsing functionallity
+//
+// https://tools.ietf.org/html/rfc4514
+//
+//   distinguishedName = [ relativeDistinguishedName
+//         *( COMMA relativeDistinguishedName ) ]
+//     relativeDistinguishedName = attributeTypeAndValue
+//         *( PLUS attributeTypeAndValue )
+//     attributeTypeAndValue = attributeType EQUALS attributeValue
+//     attributeType = descr / numericoid
+//     attributeValue = string / hexstring
+//
+//     ; The following characters are to be escaped when they appear
+//     ; in the value to be encoded: ESC, one of <escaped>, leading
+//     ; SHARP or SPACE, trailing SPACE, and NULL.
+//     string =   [ ( leadchar / pair ) [ *( stringchar / pair )
+//        ( trailchar / pair ) ] ]
+//
+//     leadchar = LUTF1 / UTFMB
+//     LUTF1 = %x01-1F / %x21 / %x24-2A / %x2D-3A /
+//        %x3D / %x3F-5B / %x5D-7F
+//
+//     trailchar  = TUTF1 / UTFMB
+//     TUTF1 = %x01-1F / %x21 / %x23-2A / %x2D-3A /
+//        %x3D / %x3F-5B / %x5D-7F
+//
+//     stringchar = SUTF1 / UTFMB
+//     SUTF1 = %x01-21 / %x23-2A / %x2D-3A /
+//        %x3D / %x3F-5B / %x5D-7F
+//
+//     pair = ESC ( ESC / special / hexpair )
+//     special = escaped / SPACE / SHARP / EQUALS
+//     escaped = DQUOTE / PLUS / COMMA / SEMI / LANGLE / RANGLE
+//     hexstring = SHARP 1*hexpair
+//     hexpair = HEX HEX
+//
+//  where the productions <descr>, <numericoid>, <COMMA>, <DQUOTE>,
+//  <EQUALS>, <ESC>, <HEX>, <LANGLE>, <NULL>, <PLUS>, <RANGLE>, <SEMI>,
+//  <SPACE>, <SHARP>, and <UTFMB> are defined in [RFC4512].
+//
+
+package ldap
+
+import (
+	"bytes"
+	enchex "encoding/hex"
+	"errors"
+	"fmt"
+	"strings"
+
+	ber "gopkg.in/asn1-ber.v1"
+)
+
+// AttributeTypeAndValue represents an attributeTypeAndValue from https://tools.ietf.org/html/rfc4514
+type AttributeTypeAndValue struct {
+	// Type is the attribute type
+	Type string
+	// Value is the attribute value
+	Value string
+}
+
+// RelativeDN represents a relativeDistinguishedName from https://tools.ietf.org/html/rfc4514
+type RelativeDN struct {
+	Attributes []*AttributeTypeAndValue
+}
+
+// DN represents a distinguishedName from https://tools.ietf.org/html/rfc4514
+type DN struct {
+	RDNs []*RelativeDN
+}
+
+// ParseDN returns a distinguishedName or an error
+func ParseDN(str string) (*DN, error) {
+	dn := new(DN)
+	dn.RDNs = make([]*RelativeDN, 0)
+	rdn := new(RelativeDN)
+	rdn.Attributes = make([]*AttributeTypeAndValue, 0)
+	buffer := bytes.Buffer{}
+	attribute := new(AttributeTypeAndValue)
+	escaping := false
+
+	unescapedTrailingSpaces := 0
+	stringFromBuffer := func() string {
+		s := buffer.String()
+		s = s[0 : len(s)-unescapedTrailingSpaces]
+		buffer.Reset()
+		unescapedTrailingSpaces = 0
+		return s
+	}
+
+	for i := 0; i < len(str); i++ {
+		char := str[i]
+		if escaping {
+			unescapedTrailingSpaces = 0
+			escaping = false
+			switch char {
+			case ' ', '"', '#', '+', ',', ';', '<', '=', '>', '\\':
+				buffer.WriteByte(char)
+				continue
+			}
+			// Not a special character, assume hex encoded octet
+			if len(str) == i+1 {
+				return nil, errors.New("Got corrupted escaped character")
+			}
+
+			dst := []byte{0}
+			n, err := enchex.Decode([]byte(dst), []byte(str[i:i+2]))
+			if err != nil {
+				return nil, fmt.Errorf("Failed to decode escaped character: %s", err)
+			} else if n != 1 {
+				return nil, fmt.Errorf("Expected 1 byte when un-escaping, got %d", n)
+			}
+			buffer.WriteByte(dst[0])
+			i++
+		} else if char == '\\' {
+			unescapedTrailingSpaces = 0
+			escaping = true
+		} else if char == '=' {
+			attribute.Type = stringFromBuffer()
+			// Special case: If the first character in the value is # the
+			// following data is BER encoded so we can just fast forward
+			// and decode.
+			if len(str) > i+1 && str[i+1] == '#' {
+				i += 2
+				index := strings.IndexAny(str[i:], ",+")
+				data := str
+				if index > 0 {
+					data = str[i : i+index]
+				} else {
+					data = str[i:]
+				}
+				rawBER, err := enchex.DecodeString(data)
+				if err != nil {
+					return nil, fmt.Errorf("Failed to decode BER encoding: %s", err)
+				}
+				packet := ber.DecodePacket(rawBER)
+				buffer.WriteString(packet.Data.String())
+				i += len(data) - 1
+			}
+		} else if char == ',' || char == '+' {
+			// We're done with this RDN or value, push it
+			attribute.Value = stringFromBuffer()
+			rdn.Attributes = append(rdn.Attributes, attribute)
+			attribute = new(AttributeTypeAndValue)
+			if char == ',' {
+				dn.RDNs = append(dn.RDNs, rdn)
+				rdn = new(RelativeDN)
+				rdn.Attributes = make([]*AttributeTypeAndValue, 0)
+			}
+		} else if char == ' ' && buffer.Len() == 0 {
+			// ignore unescaped leading spaces
+			continue
+		} else {
+			if char == ' ' {
+				// Track unescaped spaces in case they are trailing and we need to remove them
+				unescapedTrailingSpaces++
+			} else {
+				// Reset if we see a non-space char
+				unescapedTrailingSpaces = 0
+			}
+			buffer.WriteByte(char)
+		}
+	}
+	if buffer.Len() > 0 {
+		if len(attribute.Type) == 0 {
+			return nil, errors.New("DN ended with incomplete type, value pair")
+		}
+		attribute.Value = stringFromBuffer()
+		rdn.Attributes = append(rdn.Attributes, attribute)
+		dn.RDNs = append(dn.RDNs, rdn)
+	}
+	return dn, nil
+}
+
+// Equal returns true if the DNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch).
+// Returns true if they have the same number of relative distinguished names
+// and corresponding relative distinguished names (by position) are the same.
+func (d *DN) Equal(other *DN) bool {
+	if len(d.RDNs) != len(other.RDNs) {
+		return false
+	}
+	for i := range d.RDNs {
+		if !d.RDNs[i].Equal(other.RDNs[i]) {
+			return false
+		}
+	}
+	return true
+}
+
+// AncestorOf returns true if the other DN consists of at least one RDN followed by all the RDNs of the current DN.
+// "ou=widgets,o=acme.com" is an ancestor of "ou=sprockets,ou=widgets,o=acme.com"
+// "ou=widgets,o=acme.com" is not an ancestor of "ou=sprockets,ou=widgets,o=foo.com"
+// "ou=widgets,o=acme.com" is not an ancestor of "ou=widgets,o=acme.com"
+func (d *DN) AncestorOf(other *DN) bool {
+	if len(d.RDNs) >= len(other.RDNs) {
+		return false
+	}
+	// Take the last `len(d.RDNs)` RDNs from the other DN to compare against
+	otherRDNs := other.RDNs[len(other.RDNs)-len(d.RDNs):]
+	for i := range d.RDNs {
+		if !d.RDNs[i].Equal(otherRDNs[i]) {
+			return false
+		}
+	}
+	return true
+}
+
+// Equal returns true if the RelativeDNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch).
+// Relative distinguished names are the same if and only if they have the same number of AttributeTypeAndValues
+// and each attribute of the first RDN is the same as the attribute of the second RDN with the same attribute type.
+// The order of attributes is not significant.
+// Case of attribute types is not significant.
+func (r *RelativeDN) Equal(other *RelativeDN) bool {
+	if len(r.Attributes) != len(other.Attributes) {
+		return false
+	}
+	return r.hasAllAttributes(other.Attributes) && other.hasAllAttributes(r.Attributes)
+}
+
+func (r *RelativeDN) hasAllAttributes(attrs []*AttributeTypeAndValue) bool {
+	for _, attr := range attrs {
+		found := false
+		for _, myattr := range r.Attributes {
+			if myattr.Equal(attr) {
+				found = true
+				break
+			}
+		}
+		if !found {
+			return false
+		}
+	}
+	return true
+}
+
+// Equal returns true if the AttributeTypeAndValue is equivalent to the specified AttributeTypeAndValue
+// Case of the attribute type is not significant
+func (a *AttributeTypeAndValue) Equal(other *AttributeTypeAndValue) bool {
+	return strings.EqualFold(a.Type, other.Type) && a.Value == other.Value
+}

+ 4 - 0
vendor/gopkg.in/ldap.v2/doc.go

@@ -0,0 +1,4 @@
+/*
+Package ldap provides basic LDAP v3 functionality.
+*/
+package ldap

+ 148 - 0
vendor/gopkg.in/ldap.v2/error.go

@@ -0,0 +1,148 @@
+package ldap
+
+import (
+	"fmt"
+
+	"gopkg.in/asn1-ber.v1"
+)
+
+// LDAP Result Codes
+const (
+	LDAPResultSuccess                      = 0
+	LDAPResultOperationsError              = 1
+	LDAPResultProtocolError                = 2
+	LDAPResultTimeLimitExceeded            = 3
+	LDAPResultSizeLimitExceeded            = 4
+	LDAPResultCompareFalse                 = 5
+	LDAPResultCompareTrue                  = 6
+	LDAPResultAuthMethodNotSupported       = 7
+	LDAPResultStrongAuthRequired           = 8
+	LDAPResultReferral                     = 10
+	LDAPResultAdminLimitExceeded           = 11
+	LDAPResultUnavailableCriticalExtension = 12
+	LDAPResultConfidentialityRequired      = 13
+	LDAPResultSaslBindInProgress           = 14
+	LDAPResultNoSuchAttribute              = 16
+	LDAPResultUndefinedAttributeType       = 17
+	LDAPResultInappropriateMatching        = 18
+	LDAPResultConstraintViolation          = 19
+	LDAPResultAttributeOrValueExists       = 20
+	LDAPResultInvalidAttributeSyntax       = 21
+	LDAPResultNoSuchObject                 = 32
+	LDAPResultAliasProblem                 = 33
+	LDAPResultInvalidDNSyntax              = 34
+	LDAPResultAliasDereferencingProblem    = 36
+	LDAPResultInappropriateAuthentication  = 48
+	LDAPResultInvalidCredentials           = 49
+	LDAPResultInsufficientAccessRights     = 50
+	LDAPResultBusy                         = 51
+	LDAPResultUnavailable                  = 52
+	LDAPResultUnwillingToPerform           = 53
+	LDAPResultLoopDetect                   = 54
+	LDAPResultNamingViolation              = 64
+	LDAPResultObjectClassViolation         = 65
+	LDAPResultNotAllowedOnNonLeaf          = 66
+	LDAPResultNotAllowedOnRDN              = 67
+	LDAPResultEntryAlreadyExists           = 68
+	LDAPResultObjectClassModsProhibited    = 69
+	LDAPResultAffectsMultipleDSAs          = 71
+	LDAPResultOther                        = 80
+
+	ErrorNetwork            = 200
+	ErrorFilterCompile      = 201
+	ErrorFilterDecompile    = 202
+	ErrorDebugging          = 203
+	ErrorUnexpectedMessage  = 204
+	ErrorUnexpectedResponse = 205
+)
+
+// LDAPResultCodeMap contains string descriptions for LDAP error codes
+var LDAPResultCodeMap = map[uint8]string{
+	LDAPResultSuccess:                      "Success",
+	LDAPResultOperationsError:              "Operations Error",
+	LDAPResultProtocolError:                "Protocol Error",
+	LDAPResultTimeLimitExceeded:            "Time Limit Exceeded",
+	LDAPResultSizeLimitExceeded:            "Size Limit Exceeded",
+	LDAPResultCompareFalse:                 "Compare False",
+	LDAPResultCompareTrue:                  "Compare True",
+	LDAPResultAuthMethodNotSupported:       "Auth Method Not Supported",
+	LDAPResultStrongAuthRequired:           "Strong Auth Required",
+	LDAPResultReferral:                     "Referral",
+	LDAPResultAdminLimitExceeded:           "Admin Limit Exceeded",
+	LDAPResultUnavailableCriticalExtension: "Unavailable Critical Extension",
+	LDAPResultConfidentialityRequired:      "Confidentiality Required",
+	LDAPResultSaslBindInProgress:           "Sasl Bind In Progress",
+	LDAPResultNoSuchAttribute:              "No Such Attribute",
+	LDAPResultUndefinedAttributeType:       "Undefined Attribute Type",
+	LDAPResultInappropriateMatching:        "Inappropriate Matching",
+	LDAPResultConstraintViolation:          "Constraint Violation",
+	LDAPResultAttributeOrValueExists:       "Attribute Or Value Exists",
+	LDAPResultInvalidAttributeSyntax:       "Invalid Attribute Syntax",
+	LDAPResultNoSuchObject:                 "No Such Object",
+	LDAPResultAliasProblem:                 "Alias Problem",
+	LDAPResultInvalidDNSyntax:              "Invalid DN Syntax",
+	LDAPResultAliasDereferencingProblem:    "Alias Dereferencing Problem",
+	LDAPResultInappropriateAuthentication:  "Inappropriate Authentication",
+	LDAPResultInvalidCredentials:           "Invalid Credentials",
+	LDAPResultInsufficientAccessRights:     "Insufficient Access Rights",
+	LDAPResultBusy:                         "Busy",
+	LDAPResultUnavailable:                  "Unavailable",
+	LDAPResultUnwillingToPerform:           "Unwilling To Perform",
+	LDAPResultLoopDetect:                   "Loop Detect",
+	LDAPResultNamingViolation:              "Naming Violation",
+	LDAPResultObjectClassViolation:         "Object Class Violation",
+	LDAPResultNotAllowedOnNonLeaf:          "Not Allowed On Non Leaf",
+	LDAPResultNotAllowedOnRDN:              "Not Allowed On RDN",
+	LDAPResultEntryAlreadyExists:           "Entry Already Exists",
+	LDAPResultObjectClassModsProhibited:    "Object Class Mods Prohibited",
+	LDAPResultAffectsMultipleDSAs:          "Affects Multiple DSAs",
+	LDAPResultOther:                        "Other",
+}
+
+func getLDAPResultCode(packet *ber.Packet) (code uint8, description string) {
+	if packet == nil {
+		return ErrorUnexpectedResponse, "Empty packet"
+	} else if len(packet.Children) >= 2 {
+		response := packet.Children[1]
+		if response == nil {
+			return ErrorUnexpectedResponse, "Empty response in packet"
+		}
+		if response.ClassType == ber.ClassApplication && response.TagType == ber.TypeConstructed && len(response.Children) >= 3 {
+			// Children[1].Children[2] is the diagnosticMessage which is guaranteed to exist as seen here: https://tools.ietf.org/html/rfc4511#section-4.1.9
+			return uint8(response.Children[0].Value.(int64)), response.Children[2].Value.(string)
+		}
+	}
+
+	return ErrorNetwork, "Invalid packet format"
+}
+
+// Error holds LDAP error information
+type Error struct {
+	// Err is the underlying error
+	Err error
+	// ResultCode is the LDAP error code
+	ResultCode uint8
+}
+
+func (e *Error) Error() string {
+	return fmt.Sprintf("LDAP Result Code %d %q: %s", e.ResultCode, LDAPResultCodeMap[e.ResultCode], e.Err.Error())
+}
+
+// NewError creates an LDAP error with the given code and underlying error
+func NewError(resultCode uint8, err error) error {
+	return &Error{ResultCode: resultCode, Err: err}
+}
+
+// IsErrorWithCode returns true if the given error is an LDAP error with the given result code
+func IsErrorWithCode(err error, desiredResultCode uint8) bool {
+	if err == nil {
+		return false
+	}
+
+	serverError, ok := err.(*Error)
+	if !ok {
+		return false
+	}
+
+	return serverError.ResultCode == desiredResultCode
+}

+ 466 - 0
vendor/gopkg.in/ldap.v2/filter.go

@@ -0,0 +1,466 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ldap
+
+import (
+	"bytes"
+	hexpac "encoding/hex"
+	"errors"
+	"fmt"
+	"strings"
+	"unicode/utf8"
+
+	"gopkg.in/asn1-ber.v1"
+)
+
+// Filter choices
+const (
+	FilterAnd             = 0
+	FilterOr              = 1
+	FilterNot             = 2
+	FilterEqualityMatch   = 3
+	FilterSubstrings      = 4
+	FilterGreaterOrEqual  = 5
+	FilterLessOrEqual     = 6
+	FilterPresent         = 7
+	FilterApproxMatch     = 8
+	FilterExtensibleMatch = 9
+)
+
+// FilterMap contains human readable descriptions of Filter choices
+var FilterMap = map[uint64]string{
+	FilterAnd:             "And",
+	FilterOr:              "Or",
+	FilterNot:             "Not",
+	FilterEqualityMatch:   "Equality Match",
+	FilterSubstrings:      "Substrings",
+	FilterGreaterOrEqual:  "Greater Or Equal",
+	FilterLessOrEqual:     "Less Or Equal",
+	FilterPresent:         "Present",
+	FilterApproxMatch:     "Approx Match",
+	FilterExtensibleMatch: "Extensible Match",
+}
+
+// SubstringFilter options
+const (
+	FilterSubstringsInitial = 0
+	FilterSubstringsAny     = 1
+	FilterSubstringsFinal   = 2
+)
+
+// FilterSubstringsMap contains human readable descriptions of SubstringFilter choices
+var FilterSubstringsMap = map[uint64]string{
+	FilterSubstringsInitial: "Substrings Initial",
+	FilterSubstringsAny:     "Substrings Any",
+	FilterSubstringsFinal:   "Substrings Final",
+}
+
+// MatchingRuleAssertion choices
+const (
+	MatchingRuleAssertionMatchingRule = 1
+	MatchingRuleAssertionType         = 2
+	MatchingRuleAssertionMatchValue   = 3
+	MatchingRuleAssertionDNAttributes = 4
+)
+
+// MatchingRuleAssertionMap contains human readable descriptions of MatchingRuleAssertion choices
+var MatchingRuleAssertionMap = map[uint64]string{
+	MatchingRuleAssertionMatchingRule: "Matching Rule Assertion Matching Rule",
+	MatchingRuleAssertionType:         "Matching Rule Assertion Type",
+	MatchingRuleAssertionMatchValue:   "Matching Rule Assertion Match Value",
+	MatchingRuleAssertionDNAttributes: "Matching Rule Assertion DN Attributes",
+}
+
+// CompileFilter converts a string representation of a filter into a BER-encoded packet
+func CompileFilter(filter string) (*ber.Packet, error) {
+	if len(filter) == 0 || filter[0] != '(' {
+		return nil, NewError(ErrorFilterCompile, errors.New("ldap: filter does not start with an '('"))
+	}
+	packet, pos, err := compileFilter(filter, 1)
+	if err != nil {
+		return nil, err
+	}
+	if pos != len(filter) {
+		return nil, NewError(ErrorFilterCompile, errors.New("ldap: finished compiling filter with extra at end: "+fmt.Sprint(filter[pos:])))
+	}
+	return packet, nil
+}
+
+// DecompileFilter converts a packet representation of a filter into a string representation
+func DecompileFilter(packet *ber.Packet) (ret string, err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			err = NewError(ErrorFilterDecompile, errors.New("ldap: error decompiling filter"))
+		}
+	}()
+	ret = "("
+	err = nil
+	childStr := ""
+
+	switch packet.Tag {
+	case FilterAnd:
+		ret += "&"
+		for _, child := range packet.Children {
+			childStr, err = DecompileFilter(child)
+			if err != nil {
+				return
+			}
+			ret += childStr
+		}
+	case FilterOr:
+		ret += "|"
+		for _, child := range packet.Children {
+			childStr, err = DecompileFilter(child)
+			if err != nil {
+				return
+			}
+			ret += childStr
+		}
+	case FilterNot:
+		ret += "!"
+		childStr, err = DecompileFilter(packet.Children[0])
+		if err != nil {
+			return
+		}
+		ret += childStr
+
+	case FilterSubstrings:
+		ret += ber.DecodeString(packet.Children[0].Data.Bytes())
+		ret += "="
+		for i, child := range packet.Children[1].Children {
+			if i == 0 && child.Tag != FilterSubstringsInitial {
+				ret += "*"
+			}
+			ret += EscapeFilter(ber.DecodeString(child.Data.Bytes()))
+			if child.Tag != FilterSubstringsFinal {
+				ret += "*"
+			}
+		}
+	case FilterEqualityMatch:
+		ret += ber.DecodeString(packet.Children[0].Data.Bytes())
+		ret += "="
+		ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))
+	case FilterGreaterOrEqual:
+		ret += ber.DecodeString(packet.Children[0].Data.Bytes())
+		ret += ">="
+		ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))
+	case FilterLessOrEqual:
+		ret += ber.DecodeString(packet.Children[0].Data.Bytes())
+		ret += "<="
+		ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))
+	case FilterPresent:
+		ret += ber.DecodeString(packet.Data.Bytes())
+		ret += "=*"
+	case FilterApproxMatch:
+		ret += ber.DecodeString(packet.Children[0].Data.Bytes())
+		ret += "~="
+		ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))
+	case FilterExtensibleMatch:
+		attr := ""
+		dnAttributes := false
+		matchingRule := ""
+		value := ""
+
+		for _, child := range packet.Children {
+			switch child.Tag {
+			case MatchingRuleAssertionMatchingRule:
+				matchingRule = ber.DecodeString(child.Data.Bytes())
+			case MatchingRuleAssertionType:
+				attr = ber.DecodeString(child.Data.Bytes())
+			case MatchingRuleAssertionMatchValue:
+				value = ber.DecodeString(child.Data.Bytes())
+			case MatchingRuleAssertionDNAttributes:
+				dnAttributes = child.Value.(bool)
+			}
+		}
+
+		if len(attr) > 0 {
+			ret += attr
+		}
+		if dnAttributes {
+			ret += ":dn"
+		}
+		if len(matchingRule) > 0 {
+			ret += ":"
+			ret += matchingRule
+		}
+		ret += ":="
+		ret += EscapeFilter(value)
+	}
+
+	ret += ")"
+	return
+}
+
+func compileFilterSet(filter string, pos int, parent *ber.Packet) (int, error) {
+	for pos < len(filter) && filter[pos] == '(' {
+		child, newPos, err := compileFilter(filter, pos+1)
+		if err != nil {
+			return pos, err
+		}
+		pos = newPos
+		parent.AppendChild(child)
+	}
+	if pos == len(filter) {
+		return pos, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter"))
+	}
+
+	return pos + 1, nil
+}
+
+func compileFilter(filter string, pos int) (*ber.Packet, int, error) {
+	var (
+		packet *ber.Packet
+		err    error
+	)
+
+	defer func() {
+		if r := recover(); r != nil {
+			err = NewError(ErrorFilterCompile, errors.New("ldap: error compiling filter"))
+		}
+	}()
+	newPos := pos
+
+	currentRune, currentWidth := utf8.DecodeRuneInString(filter[newPos:])
+
+	switch currentRune {
+	case utf8.RuneError:
+		return nil, 0, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos))
+	case '(':
+		packet, newPos, err = compileFilter(filter, pos+currentWidth)
+		newPos++
+		return packet, newPos, err
+	case '&':
+		packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterAnd, nil, FilterMap[FilterAnd])
+		newPos, err = compileFilterSet(filter, pos+currentWidth, packet)
+		return packet, newPos, err
+	case '|':
+		packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterOr, nil, FilterMap[FilterOr])
+		newPos, err = compileFilterSet(filter, pos+currentWidth, packet)
+		return packet, newPos, err
+	case '!':
+		packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterNot, nil, FilterMap[FilterNot])
+		var child *ber.Packet
+		child, newPos, err = compileFilter(filter, pos+currentWidth)
+		packet.AppendChild(child)
+		return packet, newPos, err
+	default:
+		const (
+			stateReadingAttr                   = 0
+			stateReadingExtensibleMatchingRule = 1
+			stateReadingCondition              = 2
+		)
+
+		state := stateReadingAttr
+
+		attribute := ""
+		extensibleDNAttributes := false
+		extensibleMatchingRule := ""
+		condition := ""
+
+		for newPos < len(filter) {
+			remainingFilter := filter[newPos:]
+			currentRune, currentWidth = utf8.DecodeRuneInString(remainingFilter)
+			if currentRune == ')' {
+				break
+			}
+			if currentRune == utf8.RuneError {
+				return packet, newPos, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos))
+			}
+
+			switch state {
+			case stateReadingAttr:
+				switch {
+				// Extensible rule, with only DN-matching
+				case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:="):
+					packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
+					extensibleDNAttributes = true
+					state = stateReadingCondition
+					newPos += 5
+
+				// Extensible rule, with DN-matching and a matching OID
+				case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:"):
+					packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
+					extensibleDNAttributes = true
+					state = stateReadingExtensibleMatchingRule
+					newPos += 4
+
+				// Extensible rule, with attr only
+				case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="):
+					packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
+					state = stateReadingCondition
+					newPos += 2
+
+				// Extensible rule, with no DN attribute matching
+				case currentRune == ':':
+					packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
+					state = stateReadingExtensibleMatchingRule
+					newPos++
+
+				// Equality condition
+				case currentRune == '=':
+					packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterEqualityMatch, nil, FilterMap[FilterEqualityMatch])
+					state = stateReadingCondition
+					newPos++
+
+				// Greater-than or equal
+				case currentRune == '>' && strings.HasPrefix(remainingFilter, ">="):
+					packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterGreaterOrEqual, nil, FilterMap[FilterGreaterOrEqual])
+					state = stateReadingCondition
+					newPos += 2
+
+				// Less-than or equal
+				case currentRune == '<' && strings.HasPrefix(remainingFilter, "<="):
+					packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterLessOrEqual, nil, FilterMap[FilterLessOrEqual])
+					state = stateReadingCondition
+					newPos += 2
+
+				// Approx
+				case currentRune == '~' && strings.HasPrefix(remainingFilter, "~="):
+					packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterApproxMatch, nil, FilterMap[FilterApproxMatch])
+					state = stateReadingCondition
+					newPos += 2
+
+				// Still reading the attribute name
+				default:
+					attribute += fmt.Sprintf("%c", currentRune)
+					newPos += currentWidth
+				}
+
+			case stateReadingExtensibleMatchingRule:
+				switch {
+
+				// Matching rule OID is done
+				case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="):
+					state = stateReadingCondition
+					newPos += 2
+
+				// Still reading the matching rule oid
+				default:
+					extensibleMatchingRule += fmt.Sprintf("%c", currentRune)
+					newPos += currentWidth
+				}
+
+			case stateReadingCondition:
+				// append to the condition
+				condition += fmt.Sprintf("%c", currentRune)
+				newPos += currentWidth
+			}
+		}
+
+		if newPos == len(filter) {
+			err = NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter"))
+			return packet, newPos, err
+		}
+		if packet == nil {
+			err = NewError(ErrorFilterCompile, errors.New("ldap: error parsing filter"))
+			return packet, newPos, err
+		}
+
+		switch {
+		case packet.Tag == FilterExtensibleMatch:
+			// MatchingRuleAssertion ::= SEQUENCE {
+			//         matchingRule    [1] MatchingRuleID OPTIONAL,
+			//         type            [2] AttributeDescription OPTIONAL,
+			//         matchValue      [3] AssertionValue,
+			//         dnAttributes    [4] BOOLEAN DEFAULT FALSE
+			// }
+
+			// Include the matching rule oid, if specified
+			if len(extensibleMatchingRule) > 0 {
+				packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchingRule, extensibleMatchingRule, MatchingRuleAssertionMap[MatchingRuleAssertionMatchingRule]))
+			}
+
+			// Include the attribute, if specified
+			if len(attribute) > 0 {
+				packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionType, attribute, MatchingRuleAssertionMap[MatchingRuleAssertionType]))
+			}
+
+			// Add the value (only required child)
+			encodedString, encodeErr := escapedStringToEncodedBytes(condition)
+			if encodeErr != nil {
+				return packet, newPos, encodeErr
+			}
+			packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchValue, encodedString, MatchingRuleAssertionMap[MatchingRuleAssertionMatchValue]))
+
+			// Defaults to false, so only include in the sequence if true
+			if extensibleDNAttributes {
+				packet.AppendChild(ber.NewBoolean(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionDNAttributes, extensibleDNAttributes, MatchingRuleAssertionMap[MatchingRuleAssertionDNAttributes]))
+			}
+
+		case packet.Tag == FilterEqualityMatch && condition == "*":
+			packet = ber.NewString(ber.ClassContext, ber.TypePrimitive, FilterPresent, attribute, FilterMap[FilterPresent])
+		case packet.Tag == FilterEqualityMatch && strings.Contains(condition, "*"):
+			packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute"))
+			packet.Tag = FilterSubstrings
+			packet.Description = FilterMap[uint64(packet.Tag)]
+			seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Substrings")
+			parts := strings.Split(condition, "*")
+			for i, part := range parts {
+				if part == "" {
+					continue
+				}
+				var tag ber.Tag
+				switch i {
+				case 0:
+					tag = FilterSubstringsInitial
+				case len(parts) - 1:
+					tag = FilterSubstringsFinal
+				default:
+					tag = FilterSubstringsAny
+				}
+				encodedString, encodeErr := escapedStringToEncodedBytes(part)
+				if encodeErr != nil {
+					return packet, newPos, encodeErr
+				}
+				seq.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, tag, encodedString, FilterSubstringsMap[uint64(tag)]))
+			}
+			packet.AppendChild(seq)
+		default:
+			encodedString, encodeErr := escapedStringToEncodedBytes(condition)
+			if encodeErr != nil {
+				return packet, newPos, encodeErr
+			}
+			packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute"))
+			packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, encodedString, "Condition"))
+		}
+
+		newPos += currentWidth
+		return packet, newPos, err
+	}
+}
+
+// Convert from "ABC\xx\xx\xx" form to literal bytes for transport
+func escapedStringToEncodedBytes(escapedString string) (string, error) {
+	var buffer bytes.Buffer
+	i := 0
+	for i < len(escapedString) {
+		currentRune, currentWidth := utf8.DecodeRuneInString(escapedString[i:])
+		if currentRune == utf8.RuneError {
+			return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", i))
+		}
+
+		// Check for escaped hex characters and convert them to their literal value for transport.
+		if currentRune == '\\' {
+			// http://tools.ietf.org/search/rfc4515
+			// \ (%x5C) is not a valid character unless it is followed by two HEX characters due to not
+			// being a member of UTF1SUBSET.
+			if i+2 > len(escapedString) {
+				return "", NewError(ErrorFilterCompile, errors.New("ldap: missing characters for escape in filter"))
+			}
+			escByte, decodeErr := hexpac.DecodeString(escapedString[i+1 : i+3])
+			if decodeErr != nil {
+				return "", NewError(ErrorFilterCompile, errors.New("ldap: invalid characters for escape in filter"))
+			}
+			buffer.WriteByte(escByte[0])
+			i += 2 // +1 from end of loop, so 3 total for \xx.
+		} else {
+			buffer.WriteRune(currentRune)
+		}
+
+		i += currentWidth
+	}
+	return buffer.String(), nil
+}

+ 320 - 0
vendor/gopkg.in/ldap.v2/ldap.go

@@ -0,0 +1,320 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ldap
+
+import (
+	"errors"
+	"io/ioutil"
+	"os"
+
+	ber "gopkg.in/asn1-ber.v1"
+)
+
+// LDAP Application Codes
+const (
+	ApplicationBindRequest           = 0
+	ApplicationBindResponse          = 1
+	ApplicationUnbindRequest         = 2
+	ApplicationSearchRequest         = 3
+	ApplicationSearchResultEntry     = 4
+	ApplicationSearchResultDone      = 5
+	ApplicationModifyRequest         = 6
+	ApplicationModifyResponse        = 7
+	ApplicationAddRequest            = 8
+	ApplicationAddResponse           = 9
+	ApplicationDelRequest            = 10
+	ApplicationDelResponse           = 11
+	ApplicationModifyDNRequest       = 12
+	ApplicationModifyDNResponse      = 13
+	ApplicationCompareRequest        = 14
+	ApplicationCompareResponse       = 15
+	ApplicationAbandonRequest        = 16
+	ApplicationSearchResultReference = 19
+	ApplicationExtendedRequest       = 23
+	ApplicationExtendedResponse      = 24
+)
+
+// ApplicationMap contains human readable descriptions of LDAP Application Codes
+var ApplicationMap = map[uint8]string{
+	ApplicationBindRequest:           "Bind Request",
+	ApplicationBindResponse:          "Bind Response",
+	ApplicationUnbindRequest:         "Unbind Request",
+	ApplicationSearchRequest:         "Search Request",
+	ApplicationSearchResultEntry:     "Search Result Entry",
+	ApplicationSearchResultDone:      "Search Result Done",
+	ApplicationModifyRequest:         "Modify Request",
+	ApplicationModifyResponse:        "Modify Response",
+	ApplicationAddRequest:            "Add Request",
+	ApplicationAddResponse:           "Add Response",
+	ApplicationDelRequest:            "Del Request",
+	ApplicationDelResponse:           "Del Response",
+	ApplicationModifyDNRequest:       "Modify DN Request",
+	ApplicationModifyDNResponse:      "Modify DN Response",
+	ApplicationCompareRequest:        "Compare Request",
+	ApplicationCompareResponse:       "Compare Response",
+	ApplicationAbandonRequest:        "Abandon Request",
+	ApplicationSearchResultReference: "Search Result Reference",
+	ApplicationExtendedRequest:       "Extended Request",
+	ApplicationExtendedResponse:      "Extended Response",
+}
+
+// Ldap Behera Password Policy Draft 10 (https://tools.ietf.org/html/draft-behera-ldap-password-policy-10)
+const (
+	BeheraPasswordExpired             = 0
+	BeheraAccountLocked               = 1
+	BeheraChangeAfterReset            = 2
+	BeheraPasswordModNotAllowed       = 3
+	BeheraMustSupplyOldPassword       = 4
+	BeheraInsufficientPasswordQuality = 5
+	BeheraPasswordTooShort            = 6
+	BeheraPasswordTooYoung            = 7
+	BeheraPasswordInHistory           = 8
+)
+
+// BeheraPasswordPolicyErrorMap contains human readable descriptions of Behera Password Policy error codes
+var BeheraPasswordPolicyErrorMap = map[int8]string{
+	BeheraPasswordExpired:             "Password expired",
+	BeheraAccountLocked:               "Account locked",
+	BeheraChangeAfterReset:            "Password must be changed",
+	BeheraPasswordModNotAllowed:       "Policy prevents password modification",
+	BeheraMustSupplyOldPassword:       "Policy requires old password in order to change password",
+	BeheraInsufficientPasswordQuality: "Password fails quality checks",
+	BeheraPasswordTooShort:            "Password is too short for policy",
+	BeheraPasswordTooYoung:            "Password has been changed too recently",
+	BeheraPasswordInHistory:           "New password is in list of old passwords",
+}
+
+// Adds descriptions to an LDAP Response packet for debugging
+func addLDAPDescriptions(packet *ber.Packet) (err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			err = NewError(ErrorDebugging, errors.New("ldap: cannot process packet to add descriptions"))
+		}
+	}()
+	packet.Description = "LDAP Response"
+	packet.Children[0].Description = "Message ID"
+
+	application := uint8(packet.Children[1].Tag)
+	packet.Children[1].Description = ApplicationMap[application]
+
+	switch application {
+	case ApplicationBindRequest:
+		addRequestDescriptions(packet)
+	case ApplicationBindResponse:
+		addDefaultLDAPResponseDescriptions(packet)
+	case ApplicationUnbindRequest:
+		addRequestDescriptions(packet)
+	case ApplicationSearchRequest:
+		addRequestDescriptions(packet)
+	case ApplicationSearchResultEntry:
+		packet.Children[1].Children[0].Description = "Object Name"
+		packet.Children[1].Children[1].Description = "Attributes"
+		for _, child := range packet.Children[1].Children[1].Children {
+			child.Description = "Attribute"
+			child.Children[0].Description = "Attribute Name"
+			child.Children[1].Description = "Attribute Values"
+			for _, grandchild := range child.Children[1].Children {
+				grandchild.Description = "Attribute Value"
+			}
+		}
+		if len(packet.Children) == 3 {
+			addControlDescriptions(packet.Children[2])
+		}
+	case ApplicationSearchResultDone:
+		addDefaultLDAPResponseDescriptions(packet)
+	case ApplicationModifyRequest:
+		addRequestDescriptions(packet)
+	case ApplicationModifyResponse:
+	case ApplicationAddRequest:
+		addRequestDescriptions(packet)
+	case ApplicationAddResponse:
+	case ApplicationDelRequest:
+		addRequestDescriptions(packet)
+	case ApplicationDelResponse:
+	case ApplicationModifyDNRequest:
+		addRequestDescriptions(packet)
+	case ApplicationModifyDNResponse:
+	case ApplicationCompareRequest:
+		addRequestDescriptions(packet)
+	case ApplicationCompareResponse:
+	case ApplicationAbandonRequest:
+		addRequestDescriptions(packet)
+	case ApplicationSearchResultReference:
+	case ApplicationExtendedRequest:
+		addRequestDescriptions(packet)
+	case ApplicationExtendedResponse:
+	}
+
+	return nil
+}
+
+func addControlDescriptions(packet *ber.Packet) {
+	packet.Description = "Controls"
+	for _, child := range packet.Children {
+		var value *ber.Packet
+		controlType := ""
+		child.Description = "Control"
+		switch len(child.Children) {
+		case 0:
+			// at least one child is required for control type
+			continue
+
+		case 1:
+			// just type, no criticality or value
+			controlType = child.Children[0].Value.(string)
+			child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
+
+		case 2:
+			controlType = child.Children[0].Value.(string)
+			child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
+			// Children[1] could be criticality or value (both are optional)
+			// duck-type on whether this is a boolean
+			if _, ok := child.Children[1].Value.(bool); ok {
+				child.Children[1].Description = "Criticality"
+			} else {
+				child.Children[1].Description = "Control Value"
+				value = child.Children[1]
+			}
+
+		case 3:
+			// criticality and value present
+			controlType = child.Children[0].Value.(string)
+			child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
+			child.Children[1].Description = "Criticality"
+			child.Children[2].Description = "Control Value"
+			value = child.Children[2]
+
+		default:
+			// more than 3 children is invalid
+			continue
+		}
+		if value == nil {
+			continue
+		}
+		switch controlType {
+		case ControlTypePaging:
+			value.Description += " (Paging)"
+			if value.Value != nil {
+				valueChildren := ber.DecodePacket(value.Data.Bytes())
+				value.Data.Truncate(0)
+				value.Value = nil
+				valueChildren.Children[1].Value = valueChildren.Children[1].Data.Bytes()
+				value.AppendChild(valueChildren)
+			}
+			value.Children[0].Description = "Real Search Control Value"
+			value.Children[0].Children[0].Description = "Paging Size"
+			value.Children[0].Children[1].Description = "Cookie"
+
+		case ControlTypeBeheraPasswordPolicy:
+			value.Description += " (Password Policy - Behera Draft)"
+			if value.Value != nil {
+				valueChildren := ber.DecodePacket(value.Data.Bytes())
+				value.Data.Truncate(0)
+				value.Value = nil
+				value.AppendChild(valueChildren)
+			}
+			sequence := value.Children[0]
+			for _, child := range sequence.Children {
+				if child.Tag == 0 {
+					//Warning
+					warningPacket := child.Children[0]
+					packet := ber.DecodePacket(warningPacket.Data.Bytes())
+					val, ok := packet.Value.(int64)
+					if ok {
+						if warningPacket.Tag == 0 {
+							//timeBeforeExpiration
+							value.Description += " (TimeBeforeExpiration)"
+							warningPacket.Value = val
+						} else if warningPacket.Tag == 1 {
+							//graceAuthNsRemaining
+							value.Description += " (GraceAuthNsRemaining)"
+							warningPacket.Value = val
+						}
+					}
+				} else if child.Tag == 1 {
+					// Error
+					packet := ber.DecodePacket(child.Data.Bytes())
+					val, ok := packet.Value.(int8)
+					if !ok {
+						val = -1
+					}
+					child.Description = "Error"
+					child.Value = val
+				}
+			}
+		}
+	}
+}
+
+func addRequestDescriptions(packet *ber.Packet) {
+	packet.Description = "LDAP Request"
+	packet.Children[0].Description = "Message ID"
+	packet.Children[1].Description = ApplicationMap[uint8(packet.Children[1].Tag)]
+	if len(packet.Children) == 3 {
+		addControlDescriptions(packet.Children[2])
+	}
+}
+
+func addDefaultLDAPResponseDescriptions(packet *ber.Packet) {
+	resultCode, _ := getLDAPResultCode(packet)
+	packet.Children[1].Children[0].Description = "Result Code (" + LDAPResultCodeMap[resultCode] + ")"
+	packet.Children[1].Children[1].Description = "Matched DN"
+	packet.Children[1].Children[2].Description = "Error Message"
+	if len(packet.Children[1].Children) > 3 {
+		packet.Children[1].Children[3].Description = "Referral"
+	}
+	if len(packet.Children) == 3 {
+		addControlDescriptions(packet.Children[2])
+	}
+}
+
+// DebugBinaryFile reads and prints packets from the given filename
+func DebugBinaryFile(fileName string) error {
+	file, err := ioutil.ReadFile(fileName)
+	if err != nil {
+		return NewError(ErrorDebugging, err)
+	}
+	ber.PrintBytes(os.Stdout, file, "")
+	packet := ber.DecodePacket(file)
+	addLDAPDescriptions(packet)
+	ber.PrintPacket(packet)
+
+	return nil
+}
+
+var hex = "0123456789abcdef"
+
+func mustEscape(c byte) bool {
+	return c > 0x7f || c == '(' || c == ')' || c == '\\' || c == '*' || c == 0
+}
+
+// EscapeFilter escapes from the provided LDAP filter string the special
+// characters in the set `()*\` and those out of the range 0 < c < 0x80,
+// as defined in RFC4515.
+func EscapeFilter(filter string) string {
+	escape := 0
+	for i := 0; i < len(filter); i++ {
+		if mustEscape(filter[i]) {
+			escape++
+		}
+	}
+	if escape == 0 {
+		return filter
+	}
+	buf := make([]byte, len(filter)+escape*2)
+	for i, j := 0, 0; i < len(filter); i++ {
+		c := filter[i]
+		if mustEscape(c) {
+			buf[j+0] = '\\'
+			buf[j+1] = hex[c>>4]
+			buf[j+2] = hex[c&0xf]
+			j += 3
+		} else {
+			buf[j] = c
+			j++
+		}
+	}
+	return string(buf)
+}

+ 170 - 0
vendor/gopkg.in/ldap.v2/modify.go

@@ -0,0 +1,170 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// File contains Modify functionality
+//
+// https://tools.ietf.org/html/rfc4511
+//
+// ModifyRequest ::= [APPLICATION 6] SEQUENCE {
+//      object          LDAPDN,
+//      changes         SEQUENCE OF change SEQUENCE {
+//           operation       ENUMERATED {
+//                add     (0),
+//                delete  (1),
+//                replace (2),
+//                ...  },
+//           modification    PartialAttribute } }
+//
+// PartialAttribute ::= SEQUENCE {
+//      type       AttributeDescription,
+//      vals       SET OF value AttributeValue }
+//
+// AttributeDescription ::= LDAPString
+//                         -- Constrained to <attributedescription>
+//                         -- [RFC4512]
+//
+// AttributeValue ::= OCTET STRING
+//
+
+package ldap
+
+import (
+	"errors"
+	"log"
+
+	"gopkg.in/asn1-ber.v1"
+)
+
+// Change operation choices
+const (
+	AddAttribute     = 0
+	DeleteAttribute  = 1
+	ReplaceAttribute = 2
+)
+
+// PartialAttribute for a ModifyRequest as defined in https://tools.ietf.org/html/rfc4511
+type PartialAttribute struct {
+	// Type is the type of the partial attribute
+	Type string
+	// Vals are the values of the partial attribute
+	Vals []string
+}
+
+func (p *PartialAttribute) encode() *ber.Packet {
+	seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "PartialAttribute")
+	seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, p.Type, "Type"))
+	set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue")
+	for _, value := range p.Vals {
+		set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals"))
+	}
+	seq.AppendChild(set)
+	return seq
+}
+
+// ModifyRequest as defined in https://tools.ietf.org/html/rfc4511
+type ModifyRequest struct {
+	// DN is the distinguishedName of the directory entry to modify
+	DN string
+	// AddAttributes contain the attributes to add
+	AddAttributes []PartialAttribute
+	// DeleteAttributes contain the attributes to delete
+	DeleteAttributes []PartialAttribute
+	// ReplaceAttributes contain the attributes to replace
+	ReplaceAttributes []PartialAttribute
+}
+
+// Add inserts the given attribute to the list of attributes to add
+func (m *ModifyRequest) Add(attrType string, attrVals []string) {
+	m.AddAttributes = append(m.AddAttributes, PartialAttribute{Type: attrType, Vals: attrVals})
+}
+
+// Delete inserts the given attribute to the list of attributes to delete
+func (m *ModifyRequest) Delete(attrType string, attrVals []string) {
+	m.DeleteAttributes = append(m.DeleteAttributes, PartialAttribute{Type: attrType, Vals: attrVals})
+}
+
+// Replace inserts the given attribute to the list of attributes to replace
+func (m *ModifyRequest) Replace(attrType string, attrVals []string) {
+	m.ReplaceAttributes = append(m.ReplaceAttributes, PartialAttribute{Type: attrType, Vals: attrVals})
+}
+
+func (m ModifyRequest) encode() *ber.Packet {
+	request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationModifyRequest, nil, "Modify Request")
+	request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, m.DN, "DN"))
+	changes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Changes")
+	for _, attribute := range m.AddAttributes {
+		change := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Change")
+		change.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(AddAttribute), "Operation"))
+		change.AppendChild(attribute.encode())
+		changes.AppendChild(change)
+	}
+	for _, attribute := range m.DeleteAttributes {
+		change := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Change")
+		change.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(DeleteAttribute), "Operation"))
+		change.AppendChild(attribute.encode())
+		changes.AppendChild(change)
+	}
+	for _, attribute := range m.ReplaceAttributes {
+		change := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Change")
+		change.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(ReplaceAttribute), "Operation"))
+		change.AppendChild(attribute.encode())
+		changes.AppendChild(change)
+	}
+	request.AppendChild(changes)
+	return request
+}
+
+// NewModifyRequest creates a modify request for the given DN
+func NewModifyRequest(
+	dn string,
+) *ModifyRequest {
+	return &ModifyRequest{
+		DN: dn,
+	}
+}
+
+// Modify performs the ModifyRequest
+func (l *Conn) Modify(modifyRequest *ModifyRequest) error {
+	packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+	packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+	packet.AppendChild(modifyRequest.encode())
+
+	l.Debug.PrintPacket(packet)
+
+	msgCtx, err := l.sendMessage(packet)
+	if err != nil {
+		return err
+	}
+	defer l.finishMessage(msgCtx)
+
+	l.Debug.Printf("%d: waiting for response", msgCtx.id)
+	packetResponse, ok := <-msgCtx.responses
+	if !ok {
+		return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+	}
+	packet, err = packetResponse.ReadPacket()
+	l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+	if err != nil {
+		return err
+	}
+
+	if l.Debug {
+		if err := addLDAPDescriptions(packet); err != nil {
+			return err
+		}
+		ber.PrintPacket(packet)
+	}
+
+	if packet.Children[1].Tag == ApplicationModifyResponse {
+		resultCode, resultDescription := getLDAPResultCode(packet)
+		if resultCode != 0 {
+			return NewError(resultCode, errors.New(resultDescription))
+		}
+	} else {
+		log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
+	}
+
+	l.Debug.Printf("%d: returning", msgCtx.id)
+	return nil
+}

+ 148 - 0
vendor/gopkg.in/ldap.v2/passwdmodify.go

@@ -0,0 +1,148 @@
+// This file contains the password modify extended operation as specified in rfc 3062
+//
+// https://tools.ietf.org/html/rfc3062
+//
+
+package ldap
+
+import (
+	"errors"
+	"fmt"
+
+	"gopkg.in/asn1-ber.v1"
+)
+
+const (
+	passwordModifyOID = "1.3.6.1.4.1.4203.1.11.1"
+)
+
+// PasswordModifyRequest implements the Password Modify Extended Operation as defined in https://www.ietf.org/rfc/rfc3062.txt
+type PasswordModifyRequest struct {
+	// UserIdentity is an optional string representation of the user associated with the request.
+	// This string may or may not be an LDAPDN [RFC2253].
+	// If no UserIdentity field is present, the request acts up upon the password of the user currently associated with the LDAP session
+	UserIdentity string
+	// OldPassword, if present, contains the user's current password
+	OldPassword string
+	// NewPassword, if present, contains the desired password for this user
+	NewPassword string
+}
+
+// PasswordModifyResult holds the server response to a PasswordModifyRequest
+type PasswordModifyResult struct {
+	// GeneratedPassword holds a password generated by the server, if present
+	GeneratedPassword string
+}
+
+func (r *PasswordModifyRequest) encode() (*ber.Packet, error) {
+	request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Password Modify Extended Operation")
+	request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, passwordModifyOID, "Extended Request Name: Password Modify OID"))
+	extendedRequestValue := ber.Encode(ber.ClassContext, ber.TypePrimitive, 1, nil, "Extended Request Value: Password Modify Request")
+	passwordModifyRequestValue := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Password Modify Request")
+	if r.UserIdentity != "" {
+		passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, r.UserIdentity, "User Identity"))
+	}
+	if r.OldPassword != "" {
+		passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 1, r.OldPassword, "Old Password"))
+	}
+	if r.NewPassword != "" {
+		passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 2, r.NewPassword, "New Password"))
+	}
+
+	extendedRequestValue.AppendChild(passwordModifyRequestValue)
+	request.AppendChild(extendedRequestValue)
+
+	return request, nil
+}
+
+// NewPasswordModifyRequest creates a new PasswordModifyRequest
+//
+// According to the RFC 3602:
+// userIdentity is a string representing the user associated with the request.
+// This string may or may not be an LDAPDN (RFC 2253).
+// If userIdentity is empty then the operation will act on the user associated
+// with the session.
+//
+// oldPassword is the current user's password, it can be empty or it can be
+// needed depending on the session user access rights (usually an administrator
+// can change a user's password without knowing the current one) and the
+// password policy (see pwdSafeModify password policy's attribute)
+//
+// newPassword is the desired user's password. If empty the server can return
+// an error or generate a new password that will be available in the
+// PasswordModifyResult.GeneratedPassword
+//
+func NewPasswordModifyRequest(userIdentity string, oldPassword string, newPassword string) *PasswordModifyRequest {
+	return &PasswordModifyRequest{
+		UserIdentity: userIdentity,
+		OldPassword:  oldPassword,
+		NewPassword:  newPassword,
+	}
+}
+
+// PasswordModify performs the modification request
+func (l *Conn) PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*PasswordModifyResult, error) {
+	packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+	packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+
+	encodedPasswordModifyRequest, err := passwordModifyRequest.encode()
+	if err != nil {
+		return nil, err
+	}
+	packet.AppendChild(encodedPasswordModifyRequest)
+
+	l.Debug.PrintPacket(packet)
+
+	msgCtx, err := l.sendMessage(packet)
+	if err != nil {
+		return nil, err
+	}
+	defer l.finishMessage(msgCtx)
+
+	result := &PasswordModifyResult{}
+
+	l.Debug.Printf("%d: waiting for response", msgCtx.id)
+	packetResponse, ok := <-msgCtx.responses
+	if !ok {
+		return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+	}
+	packet, err = packetResponse.ReadPacket()
+	l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+	if err != nil {
+		return nil, err
+	}
+
+	if packet == nil {
+		return nil, NewError(ErrorNetwork, errors.New("ldap: could not retrieve message"))
+	}
+
+	if l.Debug {
+		if err := addLDAPDescriptions(packet); err != nil {
+			return nil, err
+		}
+		ber.PrintPacket(packet)
+	}
+
+	if packet.Children[1].Tag == ApplicationExtendedResponse {
+		resultCode, resultDescription := getLDAPResultCode(packet)
+		if resultCode != 0 {
+			return nil, NewError(resultCode, errors.New(resultDescription))
+		}
+	} else {
+		return nil, NewError(ErrorUnexpectedResponse, fmt.Errorf("Unexpected Response: %d", packet.Children[1].Tag))
+	}
+
+	extendedResponse := packet.Children[1]
+	for _, child := range extendedResponse.Children {
+		if child.Tag == 11 {
+			passwordModifyReponseValue := ber.DecodePacket(child.Data.Bytes())
+			if len(passwordModifyReponseValue.Children) == 1 {
+				if passwordModifyReponseValue.Children[0].Tag == 0 {
+					result.GeneratedPassword = ber.DecodeString(passwordModifyReponseValue.Children[0].Data.Bytes())
+				}
+			}
+		}
+	}
+
+	return result, nil
+}

+ 450 - 0
vendor/gopkg.in/ldap.v2/search.go

@@ -0,0 +1,450 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// File contains Search functionality
+//
+// https://tools.ietf.org/html/rfc4511
+//
+//         SearchRequest ::= [APPLICATION 3] SEQUENCE {
+//              baseObject      LDAPDN,
+//              scope           ENUMERATED {
+//                   baseObject              (0),
+//                   singleLevel             (1),
+//                   wholeSubtree            (2),
+//                   ...  },
+//              derefAliases    ENUMERATED {
+//                   neverDerefAliases       (0),
+//                   derefInSearching        (1),
+//                   derefFindingBaseObj     (2),
+//                   derefAlways             (3) },
+//              sizeLimit       INTEGER (0 ..  maxInt),
+//              timeLimit       INTEGER (0 ..  maxInt),
+//              typesOnly       BOOLEAN,
+//              filter          Filter,
+//              attributes      AttributeSelection }
+//
+//         AttributeSelection ::= SEQUENCE OF selector LDAPString
+//                         -- The LDAPString is constrained to
+//                         -- <attributeSelector> in Section 4.5.1.8
+//
+//         Filter ::= CHOICE {
+//              and             [0] SET SIZE (1..MAX) OF filter Filter,
+//              or              [1] SET SIZE (1..MAX) OF filter Filter,
+//              not             [2] Filter,
+//              equalityMatch   [3] AttributeValueAssertion,
+//              substrings      [4] SubstringFilter,
+//              greaterOrEqual  [5] AttributeValueAssertion,
+//              lessOrEqual     [6] AttributeValueAssertion,
+//              present         [7] AttributeDescription,
+//              approxMatch     [8] AttributeValueAssertion,
+//              extensibleMatch [9] MatchingRuleAssertion,
+//              ...  }
+//
+//         SubstringFilter ::= SEQUENCE {
+//              type           AttributeDescription,
+//              substrings     SEQUENCE SIZE (1..MAX) OF substring CHOICE {
+//                   initial [0] AssertionValue,  -- can occur at most once
+//                   any     [1] AssertionValue,
+//                   final   [2] AssertionValue } -- can occur at most once
+//              }
+//
+//         MatchingRuleAssertion ::= SEQUENCE {
+//              matchingRule    [1] MatchingRuleId OPTIONAL,
+//              type            [2] AttributeDescription OPTIONAL,
+//              matchValue      [3] AssertionValue,
+//              dnAttributes    [4] BOOLEAN DEFAULT FALSE }
+//
+//
+
+package ldap
+
+import (
+	"errors"
+	"fmt"
+	"sort"
+	"strings"
+
+	"gopkg.in/asn1-ber.v1"
+)
+
+// scope choices
+const (
+	ScopeBaseObject   = 0
+	ScopeSingleLevel  = 1
+	ScopeWholeSubtree = 2
+)
+
+// ScopeMap contains human readable descriptions of scope choices
+var ScopeMap = map[int]string{
+	ScopeBaseObject:   "Base Object",
+	ScopeSingleLevel:  "Single Level",
+	ScopeWholeSubtree: "Whole Subtree",
+}
+
+// derefAliases
+const (
+	NeverDerefAliases   = 0
+	DerefInSearching    = 1
+	DerefFindingBaseObj = 2
+	DerefAlways         = 3
+)
+
+// DerefMap contains human readable descriptions of derefAliases choices
+var DerefMap = map[int]string{
+	NeverDerefAliases:   "NeverDerefAliases",
+	DerefInSearching:    "DerefInSearching",
+	DerefFindingBaseObj: "DerefFindingBaseObj",
+	DerefAlways:         "DerefAlways",
+}
+
+// NewEntry returns an Entry object with the specified distinguished name and attribute key-value pairs.
+// The map of attributes is accessed in alphabetical order of the keys in order to ensure that, for the
+// same input map of attributes, the output entry will contain the same order of attributes
+func NewEntry(dn string, attributes map[string][]string) *Entry {
+	var attributeNames []string
+	for attributeName := range attributes {
+		attributeNames = append(attributeNames, attributeName)
+	}
+	sort.Strings(attributeNames)
+
+	var encodedAttributes []*EntryAttribute
+	for _, attributeName := range attributeNames {
+		encodedAttributes = append(encodedAttributes, NewEntryAttribute(attributeName, attributes[attributeName]))
+	}
+	return &Entry{
+		DN:         dn,
+		Attributes: encodedAttributes,
+	}
+}
+
+// Entry represents a single search result entry
+type Entry struct {
+	// DN is the distinguished name of the entry
+	DN string
+	// Attributes are the returned attributes for the entry
+	Attributes []*EntryAttribute
+}
+
+// GetAttributeValues returns the values for the named attribute, or an empty list
+func (e *Entry) GetAttributeValues(attribute string) []string {
+	for _, attr := range e.Attributes {
+		if attr.Name == attribute {
+			return attr.Values
+		}
+	}
+	return []string{}
+}
+
+// GetRawAttributeValues returns the byte values for the named attribute, or an empty list
+func (e *Entry) GetRawAttributeValues(attribute string) [][]byte {
+	for _, attr := range e.Attributes {
+		if attr.Name == attribute {
+			return attr.ByteValues
+		}
+	}
+	return [][]byte{}
+}
+
+// GetAttributeValue returns the first value for the named attribute, or ""
+func (e *Entry) GetAttributeValue(attribute string) string {
+	values := e.GetAttributeValues(attribute)
+	if len(values) == 0 {
+		return ""
+	}
+	return values[0]
+}
+
+// GetRawAttributeValue returns the first value for the named attribute, or an empty slice
+func (e *Entry) GetRawAttributeValue(attribute string) []byte {
+	values := e.GetRawAttributeValues(attribute)
+	if len(values) == 0 {
+		return []byte{}
+	}
+	return values[0]
+}
+
+// Print outputs a human-readable description
+func (e *Entry) Print() {
+	fmt.Printf("DN: %s\n", e.DN)
+	for _, attr := range e.Attributes {
+		attr.Print()
+	}
+}
+
+// PrettyPrint outputs a human-readable description indenting
+func (e *Entry) PrettyPrint(indent int) {
+	fmt.Printf("%sDN: %s\n", strings.Repeat(" ", indent), e.DN)
+	for _, attr := range e.Attributes {
+		attr.PrettyPrint(indent + 2)
+	}
+}
+
+// NewEntryAttribute returns a new EntryAttribute with the desired key-value pair
+func NewEntryAttribute(name string, values []string) *EntryAttribute {
+	var bytes [][]byte
+	for _, value := range values {
+		bytes = append(bytes, []byte(value))
+	}
+	return &EntryAttribute{
+		Name:       name,
+		Values:     values,
+		ByteValues: bytes,
+	}
+}
+
+// EntryAttribute holds a single attribute
+type EntryAttribute struct {
+	// Name is the name of the attribute
+	Name string
+	// Values contain the string values of the attribute
+	Values []string
+	// ByteValues contain the raw values of the attribute
+	ByteValues [][]byte
+}
+
+// Print outputs a human-readable description
+func (e *EntryAttribute) Print() {
+	fmt.Printf("%s: %s\n", e.Name, e.Values)
+}
+
+// PrettyPrint outputs a human-readable description with indenting
+func (e *EntryAttribute) PrettyPrint(indent int) {
+	fmt.Printf("%s%s: %s\n", strings.Repeat(" ", indent), e.Name, e.Values)
+}
+
+// SearchResult holds the server's response to a search request
+type SearchResult struct {
+	// Entries are the returned entries
+	Entries []*Entry
+	// Referrals are the returned referrals
+	Referrals []string
+	// Controls are the returned controls
+	Controls []Control
+}
+
+// Print outputs a human-readable description
+func (s *SearchResult) Print() {
+	for _, entry := range s.Entries {
+		entry.Print()
+	}
+}
+
+// PrettyPrint outputs a human-readable description with indenting
+func (s *SearchResult) PrettyPrint(indent int) {
+	for _, entry := range s.Entries {
+		entry.PrettyPrint(indent)
+	}
+}
+
+// SearchRequest represents a search request to send to the server
+type SearchRequest struct {
+	BaseDN       string
+	Scope        int
+	DerefAliases int
+	SizeLimit    int
+	TimeLimit    int
+	TypesOnly    bool
+	Filter       string
+	Attributes   []string
+	Controls     []Control
+}
+
+func (s *SearchRequest) encode() (*ber.Packet, error) {
+	request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationSearchRequest, nil, "Search Request")
+	request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, s.BaseDN, "Base DN"))
+	request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(s.Scope), "Scope"))
+	request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(s.DerefAliases), "Deref Aliases"))
+	request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(s.SizeLimit), "Size Limit"))
+	request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(s.TimeLimit), "Time Limit"))
+	request.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, s.TypesOnly, "Types Only"))
+	// compile and encode filter
+	filterPacket, err := CompileFilter(s.Filter)
+	if err != nil {
+		return nil, err
+	}
+	request.AppendChild(filterPacket)
+	// encode attributes
+	attributesPacket := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes")
+	for _, attribute := range s.Attributes {
+		attributesPacket.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute"))
+	}
+	request.AppendChild(attributesPacket)
+	return request, nil
+}
+
+// NewSearchRequest creates a new search request
+func NewSearchRequest(
+	BaseDN string,
+	Scope, DerefAliases, SizeLimit, TimeLimit int,
+	TypesOnly bool,
+	Filter string,
+	Attributes []string,
+	Controls []Control,
+) *SearchRequest {
+	return &SearchRequest{
+		BaseDN:       BaseDN,
+		Scope:        Scope,
+		DerefAliases: DerefAliases,
+		SizeLimit:    SizeLimit,
+		TimeLimit:    TimeLimit,
+		TypesOnly:    TypesOnly,
+		Filter:       Filter,
+		Attributes:   Attributes,
+		Controls:     Controls,
+	}
+}
+
+// SearchWithPaging accepts a search request and desired page size in order to execute LDAP queries to fulfill the
+// search request. All paged LDAP query responses will be buffered and the final result will be returned atomically.
+// The following four cases are possible given the arguments:
+//  - given SearchRequest missing a control of type ControlTypePaging: we will add one with the desired paging size
+//  - given SearchRequest contains a control of type ControlTypePaging that isn't actually a ControlPaging: fail without issuing any queries
+//  - given SearchRequest contains a control of type ControlTypePaging with pagingSize equal to the size requested: no change to the search request
+//  - given SearchRequest contains a control of type ControlTypePaging with pagingSize not equal to the size requested: fail without issuing any queries
+// A requested pagingSize of 0 is interpreted as no limit by LDAP servers.
+func (l *Conn) SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error) {
+	var pagingControl *ControlPaging
+
+	control := FindControl(searchRequest.Controls, ControlTypePaging)
+	if control == nil {
+		pagingControl = NewControlPaging(pagingSize)
+		searchRequest.Controls = append(searchRequest.Controls, pagingControl)
+	} else {
+		castControl, ok := control.(*ControlPaging)
+		if !ok {
+			return nil, fmt.Errorf("Expected paging control to be of type *ControlPaging, got %v", control)
+		}
+		if castControl.PagingSize != pagingSize {
+			return nil, fmt.Errorf("Paging size given in search request (%d) conflicts with size given in search call (%d)", castControl.PagingSize, pagingSize)
+		}
+		pagingControl = castControl
+	}
+
+	searchResult := new(SearchResult)
+	for {
+		result, err := l.Search(searchRequest)
+		l.Debug.Printf("Looking for Paging Control...")
+		if err != nil {
+			return searchResult, err
+		}
+		if result == nil {
+			return searchResult, NewError(ErrorNetwork, errors.New("ldap: packet not received"))
+		}
+
+		for _, entry := range result.Entries {
+			searchResult.Entries = append(searchResult.Entries, entry)
+		}
+		for _, referral := range result.Referrals {
+			searchResult.Referrals = append(searchResult.Referrals, referral)
+		}
+		for _, control := range result.Controls {
+			searchResult.Controls = append(searchResult.Controls, control)
+		}
+
+		l.Debug.Printf("Looking for Paging Control...")
+		pagingResult := FindControl(result.Controls, ControlTypePaging)
+		if pagingResult == nil {
+			pagingControl = nil
+			l.Debug.Printf("Could not find paging control.  Breaking...")
+			break
+		}
+
+		cookie := pagingResult.(*ControlPaging).Cookie
+		if len(cookie) == 0 {
+			pagingControl = nil
+			l.Debug.Printf("Could not find cookie.  Breaking...")
+			break
+		}
+		pagingControl.SetCookie(cookie)
+	}
+
+	if pagingControl != nil {
+		l.Debug.Printf("Abandoning Paging...")
+		pagingControl.PagingSize = 0
+		l.Search(searchRequest)
+	}
+
+	return searchResult, nil
+}
+
+// Search performs the given search request
+func (l *Conn) Search(searchRequest *SearchRequest) (*SearchResult, error) {
+	packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+	packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+	// encode search request
+	encodedSearchRequest, err := searchRequest.encode()
+	if err != nil {
+		return nil, err
+	}
+	packet.AppendChild(encodedSearchRequest)
+	// encode search controls
+	if searchRequest.Controls != nil {
+		packet.AppendChild(encodeControls(searchRequest.Controls))
+	}
+
+	l.Debug.PrintPacket(packet)
+
+	msgCtx, err := l.sendMessage(packet)
+	if err != nil {
+		return nil, err
+	}
+	defer l.finishMessage(msgCtx)
+
+	result := &SearchResult{
+		Entries:   make([]*Entry, 0),
+		Referrals: make([]string, 0),
+		Controls:  make([]Control, 0)}
+
+	foundSearchResultDone := false
+	for !foundSearchResultDone {
+		l.Debug.Printf("%d: waiting for response", msgCtx.id)
+		packetResponse, ok := <-msgCtx.responses
+		if !ok {
+			return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+		}
+		packet, err = packetResponse.ReadPacket()
+		l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+		if err != nil {
+			return nil, err
+		}
+
+		if l.Debug {
+			if err := addLDAPDescriptions(packet); err != nil {
+				return nil, err
+			}
+			ber.PrintPacket(packet)
+		}
+
+		switch packet.Children[1].Tag {
+		case 4:
+			entry := new(Entry)
+			entry.DN = packet.Children[1].Children[0].Value.(string)
+			for _, child := range packet.Children[1].Children[1].Children {
+				attr := new(EntryAttribute)
+				attr.Name = child.Children[0].Value.(string)
+				for _, value := range child.Children[1].Children {
+					attr.Values = append(attr.Values, value.Value.(string))
+					attr.ByteValues = append(attr.ByteValues, value.ByteValue)
+				}
+				entry.Attributes = append(entry.Attributes, attr)
+			}
+			result.Entries = append(result.Entries, entry)
+		case 5:
+			resultCode, resultDescription := getLDAPResultCode(packet)
+			if resultCode != 0 {
+				return result, NewError(resultCode, errors.New(resultDescription))
+			}
+			if len(packet.Children) == 3 {
+				for _, child := range packet.Children[2].Children {
+					result.Controls = append(result.Controls, DecodeControl(child))
+				}
+			}
+			foundSearchResultDone = true
+		case 19:
+			result.Referrals = append(result.Referrals, packet.Children[1].Children[0].Value.(string))
+		}
+	}
+	l.Debug.Printf("%d: returning", msgCtx.id)
+	return result, nil
+}

+ 3 - 0
vendor/gopkg.in/yaml.v2/.travis.yml

@@ -4,6 +4,9 @@ go:
     - 1.4
     - 1.5
     - 1.6
+    - 1.7
+    - 1.8
+    - 1.9
     - tip
 
 go_import_path: gopkg.in/yaml.v2

+ 13 - 0
vendor/gopkg.in/yaml.v2/NOTICE

@@ -0,0 +1,13 @@
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.

+ 2 - 2
vendor/gopkg.in/yaml.v2/README.md

@@ -48,8 +48,6 @@ The yaml package is licensed under the Apache License 2.0. Please see the LICENS
 Example
 -------
 
-Some more examples can be found in the "examples" folder.
-
 ```Go
 package main
 
@@ -67,6 +65,8 @@ b:
   d: [3, 4]
 `
 
+// Note: struct fields must be public in order for unmarshal to
+// correctly populate the data.
 type T struct {
         A string
         B struct {

+ 26 - 29
vendor/gopkg.in/yaml.v2/apic.go

@@ -2,7 +2,6 @@ package yaml
 
 import (
 	"io"
-	"os"
 )
 
 func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
@@ -48,9 +47,9 @@ func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err
 	return n, nil
 }
 
-// File read handler.
-func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
-	return parser.input_file.Read(buffer)
+// Reader read handler.
+func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+	return parser.input_reader.Read(buffer)
 }
 
 // Set a string input.
@@ -64,12 +63,12 @@ func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
 }
 
 // Set a file input.
-func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
+func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
 	if parser.read_handler != nil {
 		panic("must set the input source only once")
 	}
-	parser.read_handler = yaml_file_read_handler
-	parser.input_file = file
+	parser.read_handler = yaml_reader_read_handler
+	parser.input_reader = r
 }
 
 // Set the source encoding.
@@ -81,14 +80,13 @@ func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
 }
 
 // Create a new emitter object.
-func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
+func yaml_emitter_initialize(emitter *yaml_emitter_t) {
 	*emitter = yaml_emitter_t{
 		buffer:     make([]byte, output_buffer_size),
 		raw_buffer: make([]byte, 0, output_raw_buffer_size),
 		states:     make([]yaml_emitter_state_t, 0, initial_stack_size),
 		events:     make([]yaml_event_t, 0, initial_queue_size),
 	}
-	return true
 }
 
 // Destroy an emitter object.
@@ -102,9 +100,10 @@ func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
 	return nil
 }
 
-// File write handler.
-func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
-	_, err := emitter.output_file.Write(buffer)
+// yaml_writer_write_handler uses emitter.output_writer to write the
+// emitted text.
+func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+	_, err := emitter.output_writer.Write(buffer)
 	return err
 }
 
@@ -118,12 +117,12 @@ func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]by
 }
 
 // Set a file output.
-func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
+func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
 	if emitter.write_handler != nil {
 		panic("must set the output target only once")
 	}
-	emitter.write_handler = yaml_file_write_handler
-	emitter.output_file = file
+	emitter.write_handler = yaml_writer_write_handler
+	emitter.output_writer = w
 }
 
 // Set the output encoding.
@@ -252,41 +251,41 @@ func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
 //
 
 // Create STREAM-START.
-func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
 	*event = yaml_event_t{
 		typ:      yaml_STREAM_START_EVENT,
 		encoding: encoding,
 	}
-	return true
 }
 
 // Create STREAM-END.
-func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
+func yaml_stream_end_event_initialize(event *yaml_event_t) {
 	*event = yaml_event_t{
 		typ: yaml_STREAM_END_EVENT,
 	}
-	return true
 }
 
 // Create DOCUMENT-START.
-func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
-	tag_directives []yaml_tag_directive_t, implicit bool) bool {
+func yaml_document_start_event_initialize(
+	event *yaml_event_t,
+	version_directive *yaml_version_directive_t,
+	tag_directives []yaml_tag_directive_t,
+	implicit bool,
+) {
 	*event = yaml_event_t{
 		typ:               yaml_DOCUMENT_START_EVENT,
 		version_directive: version_directive,
 		tag_directives:    tag_directives,
 		implicit:          implicit,
 	}
-	return true
 }
 
 // Create DOCUMENT-END.
-func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
 	*event = yaml_event_t{
 		typ:      yaml_DOCUMENT_END_EVENT,
 		implicit: implicit,
 	}
-	return true
 }
 
 ///*
@@ -348,7 +347,7 @@ func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
 }
 
 // Create MAPPING-START.
-func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
 	*event = yaml_event_t{
 		typ:      yaml_MAPPING_START_EVENT,
 		anchor:   anchor,
@@ -356,15 +355,13 @@ func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte
 		implicit: implicit,
 		style:    yaml_style_t(style),
 	}
-	return true
 }
 
 // Create MAPPING-END.
-func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
+func yaml_mapping_end_event_initialize(event *yaml_event_t) {
 	*event = yaml_event_t{
 		typ: yaml_MAPPING_END_EVENT,
 	}
-	return true
 }
 
 // Destroy an event object.
@@ -471,7 +468,7 @@ func yaml_event_delete(event *yaml_event_t) {
 //    } context
 //    tag_directive *yaml_tag_directive_t
 //
-//    context.error = YAML_NO_ERROR // Eliminate a compliler warning.
+//    context.error = YAML_NO_ERROR // Eliminate a compiler warning.
 //
 //    assert(document) // Non-NULL document object is expected.
 //

+ 164 - 74
vendor/gopkg.in/yaml.v2/decode.go

@@ -4,6 +4,7 @@ import (
 	"encoding"
 	"encoding/base64"
 	"fmt"
+	"io"
 	"math"
 	"reflect"
 	"strconv"
@@ -22,19 +23,22 @@ type node struct {
 	kind         int
 	line, column int
 	tag          string
-	value        string
-	implicit     bool
-	children     []*node
-	anchors      map[string]*node
+	// For an alias node, alias holds the resolved alias.
+	alias    *node
+	value    string
+	implicit bool
+	children []*node
+	anchors  map[string]*node
 }
 
 // ----------------------------------------------------------------------------
 // Parser, produces a node tree out of a libyaml event stream.
 
 type parser struct {
-	parser yaml_parser_t
-	event  yaml_event_t
-	doc    *node
+	parser   yaml_parser_t
+	event    yaml_event_t
+	doc      *node
+	doneInit bool
 }
 
 func newParser(b []byte) *parser {
@@ -42,21 +46,30 @@ func newParser(b []byte) *parser {
 	if !yaml_parser_initialize(&p.parser) {
 		panic("failed to initialize YAML emitter")
 	}
-
 	if len(b) == 0 {
 		b = []byte{'\n'}
 	}
-
 	yaml_parser_set_input_string(&p.parser, b)
+	return &p
+}
 
-	p.skip()
-	if p.event.typ != yaml_STREAM_START_EVENT {
-		panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
+func newParserFromReader(r io.Reader) *parser {
+	p := parser{}
+	if !yaml_parser_initialize(&p.parser) {
+		panic("failed to initialize YAML emitter")
 	}
-	p.skip()
+	yaml_parser_set_input_reader(&p.parser, r)
 	return &p
 }
 
+func (p *parser) init() {
+	if p.doneInit {
+		return
+	}
+	p.expect(yaml_STREAM_START_EVENT)
+	p.doneInit = true
+}
+
 func (p *parser) destroy() {
 	if p.event.typ != yaml_NO_EVENT {
 		yaml_event_delete(&p.event)
@@ -64,16 +77,35 @@ func (p *parser) destroy() {
 	yaml_parser_delete(&p.parser)
 }
 
-func (p *parser) skip() {
-	if p.event.typ != yaml_NO_EVENT {
-		if p.event.typ == yaml_STREAM_END_EVENT {
-			failf("attempted to go past the end of stream; corrupted value?")
+// expect consumes an event from the event stream and
+// checks that it's of the expected type.
+func (p *parser) expect(e yaml_event_type_t) {
+	if p.event.typ == yaml_NO_EVENT {
+		if !yaml_parser_parse(&p.parser, &p.event) {
+			p.fail()
 		}
-		yaml_event_delete(&p.event)
+	}
+	if p.event.typ == yaml_STREAM_END_EVENT {
+		failf("attempted to go past the end of stream; corrupted value?")
+	}
+	if p.event.typ != e {
+		p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
+		p.fail()
+	}
+	yaml_event_delete(&p.event)
+	p.event.typ = yaml_NO_EVENT
+}
+
+// peek peeks at the next event in the event stream,
+// puts the results into p.event and returns the event type.
+func (p *parser) peek() yaml_event_type_t {
+	if p.event.typ != yaml_NO_EVENT {
+		return p.event.typ
 	}
 	if !yaml_parser_parse(&p.parser, &p.event) {
 		p.fail()
 	}
+	return p.event.typ
 }
 
 func (p *parser) fail() {
@@ -81,6 +113,10 @@ func (p *parser) fail() {
 	var line int
 	if p.parser.problem_mark.line != 0 {
 		line = p.parser.problem_mark.line
+		// Scanner errors don't iterate line before returning error
+		if p.parser.error == yaml_SCANNER_ERROR {
+			line++
+		}
 	} else if p.parser.context_mark.line != 0 {
 		line = p.parser.context_mark.line
 	}
@@ -103,7 +139,8 @@ func (p *parser) anchor(n *node, anchor []byte) {
 }
 
 func (p *parser) parse() *node {
-	switch p.event.typ {
+	p.init()
+	switch p.peek() {
 	case yaml_SCALAR_EVENT:
 		return p.scalar()
 	case yaml_ALIAS_EVENT:
@@ -118,7 +155,7 @@ func (p *parser) parse() *node {
 		// Happens when attempting to decode an empty buffer.
 		return nil
 	default:
-		panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
+		panic("attempted to parse unknown event: " + p.event.typ.String())
 	}
 }
 
@@ -134,19 +171,20 @@ func (p *parser) document() *node {
 	n := p.node(documentNode)
 	n.anchors = make(map[string]*node)
 	p.doc = n
-	p.skip()
+	p.expect(yaml_DOCUMENT_START_EVENT)
 	n.children = append(n.children, p.parse())
-	if p.event.typ != yaml_DOCUMENT_END_EVENT {
-		panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
-	}
-	p.skip()
+	p.expect(yaml_DOCUMENT_END_EVENT)
 	return n
 }
 
 func (p *parser) alias() *node {
 	n := p.node(aliasNode)
 	n.value = string(p.event.anchor)
-	p.skip()
+	n.alias = p.doc.anchors[n.value]
+	if n.alias == nil {
+		failf("unknown anchor '%s' referenced", n.value)
+	}
+	p.expect(yaml_ALIAS_EVENT)
 	return n
 }
 
@@ -156,29 +194,29 @@ func (p *parser) scalar() *node {
 	n.tag = string(p.event.tag)
 	n.implicit = p.event.implicit
 	p.anchor(n, p.event.anchor)
-	p.skip()
+	p.expect(yaml_SCALAR_EVENT)
 	return n
 }
 
 func (p *parser) sequence() *node {
 	n := p.node(sequenceNode)
 	p.anchor(n, p.event.anchor)
-	p.skip()
-	for p.event.typ != yaml_SEQUENCE_END_EVENT {
+	p.expect(yaml_SEQUENCE_START_EVENT)
+	for p.peek() != yaml_SEQUENCE_END_EVENT {
 		n.children = append(n.children, p.parse())
 	}
-	p.skip()
+	p.expect(yaml_SEQUENCE_END_EVENT)
 	return n
 }
 
 func (p *parser) mapping() *node {
 	n := p.node(mappingNode)
 	p.anchor(n, p.event.anchor)
-	p.skip()
-	for p.event.typ != yaml_MAPPING_END_EVENT {
+	p.expect(yaml_MAPPING_START_EVENT)
+	for p.peek() != yaml_MAPPING_END_EVENT {
 		n.children = append(n.children, p.parse(), p.parse())
 	}
-	p.skip()
+	p.expect(yaml_MAPPING_END_EVENT)
 	return n
 }
 
@@ -187,7 +225,7 @@ func (p *parser) mapping() *node {
 
 type decoder struct {
 	doc     *node
-	aliases map[string]bool
+	aliases map[*node]bool
 	mapType reflect.Type
 	terrors []string
 	strict  bool
@@ -198,11 +236,13 @@ var (
 	durationType   = reflect.TypeOf(time.Duration(0))
 	defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
 	ifaceType      = defaultMapType.Elem()
+	timeType       = reflect.TypeOf(time.Time{})
+	ptrTimeType    = reflect.TypeOf(&time.Time{})
 )
 
 func newDecoder(strict bool) *decoder {
 	d := &decoder{mapType: defaultMapType, strict: strict}
-	d.aliases = make(map[string]bool)
+	d.aliases = make(map[*node]bool)
 	return d
 }
 
@@ -308,16 +348,13 @@ func (d *decoder) document(n *node, out reflect.Value) (good bool) {
 }
 
 func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
-	an, ok := d.doc.anchors[n.value]
-	if !ok {
-		failf("unknown anchor '%s' referenced", n.value)
-	}
-	if d.aliases[n.value] {
+	if d.aliases[n] {
+		// TODO this could actually be allowed in some circumstances.
 		failf("anchor '%s' value contains itself", n.value)
 	}
-	d.aliases[n.value] = true
-	good = d.unmarshal(an, out)
-	delete(d.aliases, n.value)
+	d.aliases[n] = true
+	good = d.unmarshal(n.alias, out)
+	delete(d.aliases, n)
 	return good
 }
 
@@ -329,7 +366,7 @@ func resetMap(out reflect.Value) {
 	}
 }
 
-func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
+func (d *decoder) scalar(n *node, out reflect.Value) bool {
 	var tag string
 	var resolved interface{}
 	if n.tag == "" && !n.implicit {
@@ -353,9 +390,26 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
 		}
 		return true
 	}
-	if s, ok := resolved.(string); ok && out.CanAddr() {
-		if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
-			err := u.UnmarshalText([]byte(s))
+	if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+		// We've resolved to exactly the type we want, so use that.
+		out.Set(resolvedv)
+		return true
+	}
+	// Perhaps we can use the value as a TextUnmarshaler to
+	// set its value.
+	if out.CanAddr() {
+		u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
+		if ok {
+			var text []byte
+			if tag == yaml_BINARY_TAG {
+				text = []byte(resolved.(string))
+			} else {
+				// We let any value be unmarshaled into TextUnmarshaler.
+				// That might be more lax than we'd like, but the
+				// TextUnmarshaler itself should bowl out any dubious values.
+				text = []byte(n.value)
+			}
+			err := u.UnmarshalText(text)
 			if err != nil {
 				fail(err)
 			}
@@ -366,46 +420,54 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
 	case reflect.String:
 		if tag == yaml_BINARY_TAG {
 			out.SetString(resolved.(string))
-			good = true
-		} else if resolved != nil {
+			return true
+		}
+		if resolved != nil {
 			out.SetString(n.value)
-			good = true
+			return true
 		}
 	case reflect.Interface:
 		if resolved == nil {
 			out.Set(reflect.Zero(out.Type()))
+		} else if tag == yaml_TIMESTAMP_TAG {
+			// It looks like a timestamp but for backward compatibility
+			// reasons we set it as a string, so that code that unmarshals
+			// timestamp-like values into interface{} will continue to
+			// see a string and not a time.Time.
+			// TODO(v3) Drop this.
+			out.Set(reflect.ValueOf(n.value))
 		} else {
 			out.Set(reflect.ValueOf(resolved))
 		}
-		good = true
+		return true
 	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
 		switch resolved := resolved.(type) {
 		case int:
 			if !out.OverflowInt(int64(resolved)) {
 				out.SetInt(int64(resolved))
-				good = true
+				return true
 			}
 		case int64:
 			if !out.OverflowInt(resolved) {
 				out.SetInt(resolved)
-				good = true
+				return true
 			}
 		case uint64:
 			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
 				out.SetInt(int64(resolved))
-				good = true
+				return true
 			}
 		case float64:
 			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
 				out.SetInt(int64(resolved))
-				good = true
+				return true
 			}
 		case string:
 			if out.Type() == durationType {
 				d, err := time.ParseDuration(resolved)
 				if err == nil {
 					out.SetInt(int64(d))
-					good = true
+					return true
 				}
 			}
 		}
@@ -414,44 +476,49 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
 		case int:
 			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
 				out.SetUint(uint64(resolved))
-				good = true
+				return true
 			}
 		case int64:
 			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
 				out.SetUint(uint64(resolved))
-				good = true
+				return true
 			}
 		case uint64:
 			if !out.OverflowUint(uint64(resolved)) {
 				out.SetUint(uint64(resolved))
-				good = true
+				return true
 			}
 		case float64:
 			if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
 				out.SetUint(uint64(resolved))
-				good = true
+				return true
 			}
 		}
 	case reflect.Bool:
 		switch resolved := resolved.(type) {
 		case bool:
 			out.SetBool(resolved)
-			good = true
+			return true
 		}
 	case reflect.Float32, reflect.Float64:
 		switch resolved := resolved.(type) {
 		case int:
 			out.SetFloat(float64(resolved))
-			good = true
+			return true
 		case int64:
 			out.SetFloat(float64(resolved))
-			good = true
+			return true
 		case uint64:
 			out.SetFloat(float64(resolved))
-			good = true
+			return true
 		case float64:
 			out.SetFloat(resolved)
-			good = true
+			return true
+		}
+	case reflect.Struct:
+		if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+			out.Set(resolvedv)
+			return true
 		}
 	case reflect.Ptr:
 		if out.Type().Elem() == reflect.TypeOf(resolved) {
@@ -459,13 +526,11 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
 			elem := reflect.New(out.Type().Elem())
 			elem.Elem().Set(reflect.ValueOf(resolved))
 			out.Set(elem)
-			good = true
+			return true
 		}
 	}
-	if !good {
-		d.terror(n, tag, out)
-	}
-	return good
+	d.terror(n, tag, out)
+	return false
 }
 
 func settableValueOf(i interface{}) reflect.Value {
@@ -482,6 +547,10 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
 	switch out.Kind() {
 	case reflect.Slice:
 		out.Set(reflect.MakeSlice(out.Type(), l, l))
+	case reflect.Array:
+		if l != out.Len() {
+			failf("invalid array: want %d elements but got %d", out.Len(), l)
+		}
 	case reflect.Interface:
 		// No type hints. Will have to use a generic sequence.
 		iface = out
@@ -500,7 +569,9 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
 			j++
 		}
 	}
-	out.Set(out.Slice(0, j))
+	if out.Kind() != reflect.Array {
+		out.Set(out.Slice(0, j))
+	}
 	if iface.IsValid() {
 		iface.Set(out)
 	}
@@ -561,7 +632,7 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
 			}
 			e := reflect.New(et).Elem()
 			if d.unmarshal(n.children[i+1], e) {
-				out.SetMapIndex(k, e)
+				d.setMapIndex(n.children[i+1], out, k, e)
 			}
 		}
 	}
@@ -569,6 +640,14 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
 	return true
 }
 
+func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) {
+	if d.strict && out.MapIndex(k) != zeroValue {
+		d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface()))
+		return
+	}
+	out.SetMapIndex(k, v)
+}
+
 func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
 	outt := out.Type()
 	if outt.Elem() != mapItemType {
@@ -616,6 +695,10 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
 		elemType = inlineMap.Type().Elem()
 	}
 
+	var doneFields []bool
+	if d.strict {
+		doneFields = make([]bool, len(sinfo.FieldsList))
+	}
 	for i := 0; i < l; i += 2 {
 		ni := n.children[i]
 		if isMerge(ni) {
@@ -626,6 +709,13 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
 			continue
 		}
 		if info, ok := sinfo.FieldsMap[name.String()]; ok {
+			if d.strict {
+				if doneFields[info.Id] {
+					d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type()))
+					continue
+				}
+				doneFields[info.Id] = true
+			}
 			var field reflect.Value
 			if info.Inline == nil {
 				field = out.Field(info.Num)
@@ -639,9 +729,9 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
 			}
 			value := reflect.New(elemType).Elem()
 			d.unmarshal(n.children[i+1], value)
-			inlineMap.SetMapIndex(name, value)
+			d.setMapIndex(n.children[i+1], inlineMap, name, value)
 		} else if d.strict {
-			d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in struct %s", n.line+1, name.String(), out.Type()))
+			d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type()))
 		}
 	}
 	return true

+ 6 - 5
vendor/gopkg.in/yaml.v2/emitterc.go

@@ -2,6 +2,7 @@ package yaml
 
 import (
 	"bytes"
+	"fmt"
 )
 
 // Flush the buffer if needed.
@@ -664,7 +665,7 @@ func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
 		return yaml_emitter_emit_mapping_start(emitter, event)
 	default:
 		return yaml_emitter_set_emitter_error(emitter,
-			"expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
+			fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
 	}
 }
 
@@ -842,7 +843,7 @@ func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event
 	return true
 }
 
-// Write an achor.
+// Write an anchor.
 func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
 	if emitter.anchor_data.anchor == nil {
 		return true
@@ -995,9 +996,9 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
 		space_break    = false
 
 		preceded_by_whitespace = false
-		followed_by_whitespace  = false
-		previous_space          = false
-		previous_break          = false
+		followed_by_whitespace = false
+		previous_space         = false
+		previous_break         = false
 	)
 
 	emitter.scalar_data.value = value

+ 124 - 40
vendor/gopkg.in/yaml.v2/encode.go

@@ -3,38 +3,67 @@ package yaml
 import (
 	"encoding"
 	"fmt"
+	"io"
 	"reflect"
 	"regexp"
 	"sort"
 	"strconv"
 	"strings"
 	"time"
+	"unicode/utf8"
 )
 
+// jsonNumber is the interface of the encoding/json.Number datatype.
+// Repeating the interface here avoids a dependency on encoding/json, and also
+// supports other libraries like jsoniter, which use a similar datatype with
+// the same interface. Detecting this interface is useful when dealing with
+// structures containing json.Number, which is a string under the hood. The
+// encoder should prefer the use of Int64(), Float64() and string(), in that
+// order, when encoding this type.
+type jsonNumber interface {
+	Float64() (float64, error)
+	Int64() (int64, error)
+	String() string
+}
+
 type encoder struct {
 	emitter yaml_emitter_t
 	event   yaml_event_t
 	out     []byte
 	flow    bool
+	// doneInit holds whether the initial stream_start_event has been
+	// emitted.
+	doneInit bool
 }
 
-func newEncoder() (e *encoder) {
-	e = &encoder{}
-	e.must(yaml_emitter_initialize(&e.emitter))
+func newEncoder() *encoder {
+	e := &encoder{}
+	yaml_emitter_initialize(&e.emitter)
 	yaml_emitter_set_output_string(&e.emitter, &e.out)
 	yaml_emitter_set_unicode(&e.emitter, true)
-	e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
-	e.emit()
-	e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
-	e.emit()
 	return e
 }
 
-func (e *encoder) finish() {
-	e.must(yaml_document_end_event_initialize(&e.event, true))
+func newEncoderWithWriter(w io.Writer) *encoder {
+	e := &encoder{}
+	yaml_emitter_initialize(&e.emitter)
+	yaml_emitter_set_output_writer(&e.emitter, w)
+	yaml_emitter_set_unicode(&e.emitter, true)
+	return e
+}
+
+func (e *encoder) init() {
+	if e.doneInit {
+		return
+	}
+	yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
 	e.emit()
+	e.doneInit = true
+}
+
+func (e *encoder) finish() {
 	e.emitter.open_ended = false
-	e.must(yaml_stream_end_event_initialize(&e.event))
+	yaml_stream_end_event_initialize(&e.event)
 	e.emit()
 }
 
@@ -44,9 +73,7 @@ func (e *encoder) destroy() {
 
 func (e *encoder) emit() {
 	// This will internally delete the e.event value.
-	if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
-		e.must(false)
-	}
+	e.must(yaml_emitter_emit(&e.emitter, &e.event))
 }
 
 func (e *encoder) must(ok bool) {
@@ -59,13 +86,43 @@ func (e *encoder) must(ok bool) {
 	}
 }
 
+func (e *encoder) marshalDoc(tag string, in reflect.Value) {
+	e.init()
+	yaml_document_start_event_initialize(&e.event, nil, nil, true)
+	e.emit()
+	e.marshal(tag, in)
+	yaml_document_end_event_initialize(&e.event, true)
+	e.emit()
+}
+
 func (e *encoder) marshal(tag string, in reflect.Value) {
-	if !in.IsValid() {
+	if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
 		e.nilv()
 		return
 	}
 	iface := in.Interface()
-	if m, ok := iface.(Marshaler); ok {
+	switch m := iface.(type) {
+	case jsonNumber:
+		integer, err := m.Int64()
+		if err == nil {
+			// In this case the json.Number is a valid int64
+			in = reflect.ValueOf(integer)
+			break
+		}
+		float, err := m.Float64()
+		if err == nil {
+			// In this case the json.Number is a valid float64
+			in = reflect.ValueOf(float)
+			break
+		}
+		// fallback case - no number could be obtained
+		in = reflect.ValueOf(m.String())
+	case time.Time, *time.Time:
+		// Although time.Time implements TextMarshaler,
+		// we don't want to treat it as a string for YAML
+		// purposes because YAML has special support for
+		// timestamps.
+	case Marshaler:
 		v, err := m.MarshalYAML()
 		if err != nil {
 			fail(err)
@@ -75,31 +132,34 @@ func (e *encoder) marshal(tag string, in reflect.Value) {
 			return
 		}
 		in = reflect.ValueOf(v)
-	} else if m, ok := iface.(encoding.TextMarshaler); ok {
+	case encoding.TextMarshaler:
 		text, err := m.MarshalText()
 		if err != nil {
 			fail(err)
 		}
 		in = reflect.ValueOf(string(text))
+	case nil:
+		e.nilv()
+		return
 	}
 	switch in.Kind() {
 	case reflect.Interface:
-		if in.IsNil() {
-			e.nilv()
-		} else {
-			e.marshal(tag, in.Elem())
-		}
+		e.marshal(tag, in.Elem())
 	case reflect.Map:
 		e.mapv(tag, in)
 	case reflect.Ptr:
-		if in.IsNil() {
-			e.nilv()
+		if in.Type() == ptrTimeType {
+			e.timev(tag, in.Elem())
 		} else {
 			e.marshal(tag, in.Elem())
 		}
 	case reflect.Struct:
-		e.structv(tag, in)
-	case reflect.Slice:
+		if in.Type() == timeType {
+			e.timev(tag, in)
+		} else {
+			e.structv(tag, in)
+		}
+	case reflect.Slice, reflect.Array:
 		if in.Type().Elem() == mapItemType {
 			e.itemsv(tag, in)
 		} else {
@@ -191,10 +251,10 @@ func (e *encoder) mappingv(tag string, f func()) {
 		e.flow = false
 		style = yaml_FLOW_MAPPING_STYLE
 	}
-	e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+	yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
 	e.emit()
 	f()
-	e.must(yaml_mapping_end_event_initialize(&e.event))
+	yaml_mapping_end_event_initialize(&e.event)
 	e.emit()
 }
 
@@ -240,23 +300,36 @@ var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0
 func (e *encoder) stringv(tag string, in reflect.Value) {
 	var style yaml_scalar_style_t
 	s := in.String()
-	rtag, rs := resolve("", s)
-	if rtag == yaml_BINARY_TAG {
-		if tag == "" || tag == yaml_STR_TAG {
-			tag = rtag
-			s = rs.(string)
-		} else if tag == yaml_BINARY_TAG {
+	canUsePlain := true
+	switch {
+	case !utf8.ValidString(s):
+		if tag == yaml_BINARY_TAG {
 			failf("explicitly tagged !!binary data must be base64-encoded")
-		} else {
+		}
+		if tag != "" {
 			failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
 		}
+		// It can't be encoded directly as YAML so use a binary tag
+		// and encode it as base64.
+		tag = yaml_BINARY_TAG
+		s = encodeBase64(s)
+	case tag == "":
+		// Check to see if it would resolve to a specific
+		// tag when encoded unquoted. If it doesn't,
+		// there's no need to quote it.
+		rtag, _ := resolve("", s)
+		canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s)
 	}
-	if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
-		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
-	} else if strings.Contains(s, "\n") {
+	// Note: it's possible for user code to emit invalid YAML
+	// if they explicitly specify a tag and a string containing
+	// text that's incompatible with that tag.
+	switch {
+	case strings.Contains(s, "\n"):
 		style = yaml_LITERAL_SCALAR_STYLE
-	} else {
+	case canUsePlain:
 		style = yaml_PLAIN_SCALAR_STYLE
+	default:
+		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
 	}
 	e.emitScalar(s, "", tag, style)
 }
@@ -281,9 +354,20 @@ func (e *encoder) uintv(tag string, in reflect.Value) {
 	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
 }
 
+func (e *encoder) timev(tag string, in reflect.Value) {
+	t := in.Interface().(time.Time)
+	s := t.Format(time.RFC3339Nano)
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
 func (e *encoder) floatv(tag string, in reflect.Value) {
-	// FIXME: Handle 64 bits here.
-	s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
+	// Issue #352: When formatting, use the precision of the underlying value
+	precision := 64
+	if in.Kind() == reflect.Float32 {
+		precision = 32
+	}
+
+	s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
 	switch s {
 	case "+Inf":
 		s = ".inf"

+ 5 - 0
vendor/gopkg.in/yaml.v2/go.mod

@@ -0,0 +1,5 @@
+module "gopkg.in/yaml.v2"
+
+require (
+	"gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405
+)

+ 19 - 1
vendor/gopkg.in/yaml.v2/readerc.go

@@ -93,9 +93,18 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
 		panic("read handler must be set")
 	}
 
+	// [Go] This function was changed to guarantee the requested length size at EOF.
+	// The fact we need to do this is pretty awful, but the description above implies
+	// for that to be the case, and there are tests 
+
 	// If the EOF flag is set and the raw buffer is empty, do nothing.
 	if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
-		return true
+		// [Go] ACTUALLY! Read the documentation of this function above.
+		// This is just broken. To return true, we need to have the
+		// given length in the buffer. Not doing that means every single
+		// check that calls this function to make sure the buffer has a
+		// given length is Go) panicking; or C) accessing invalid memory.
+		//return true
 	}
 
 	// Return if the buffer contains enough characters.
@@ -389,6 +398,15 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
 			break
 		}
 	}
+	// [Go] Read the documentation of this function above. To return true,
+	// we need to have the given length in the buffer. Not doing that means
+	// every single check that calls this function to make sure the buffer
+	// has a given length is Go) panicking; or C) accessing invalid memory.
+	// This happens here due to the EOF above breaking early.
+	for buffer_len < length {
+		parser.buffer[buffer_len] = 0
+		buffer_len++
+	}
 	parser.buffer = parser.buffer[:buffer_len]
 	return true
 }

+ 65 - 15
vendor/gopkg.in/yaml.v2/resolve.go

@@ -6,7 +6,7 @@ import (
 	"regexp"
 	"strconv"
 	"strings"
-	"unicode/utf8"
+	"time"
 )
 
 type resolveMapItem struct {
@@ -75,7 +75,7 @@ func longTag(tag string) string {
 
 func resolvableTag(tag string) bool {
 	switch tag {
-	case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
+	case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG:
 		return true
 	}
 	return false
@@ -92,6 +92,19 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
 		switch tag {
 		case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
 			return
+		case yaml_FLOAT_TAG:
+			if rtag == yaml_INT_TAG {
+				switch v := out.(type) {
+				case int64:
+					rtag = yaml_FLOAT_TAG
+					out = float64(v)
+					return
+				case int:
+					rtag = yaml_FLOAT_TAG
+					out = float64(v)
+					return
+				}
+			}
 		}
 		failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
 	}()
@@ -125,6 +138,15 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
 
 		case 'D', 'S':
 			// Int, float, or timestamp.
+			// Only try values as a timestamp if the value is unquoted or there's an explicit
+			// !!timestamp tag.
+			if tag == "" || tag == yaml_TIMESTAMP_TAG {
+				t, ok := parseTimestamp(in)
+				if ok {
+					return yaml_TIMESTAMP_TAG, t
+				}
+			}
+
 			plain := strings.Replace(in, "_", "", -1)
 			intv, err := strconv.ParseInt(plain, 0, 64)
 			if err == nil {
@@ -158,28 +180,20 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
 					return yaml_INT_TAG, uintv
 				}
 			} else if strings.HasPrefix(plain, "-0b") {
-				intv, err := strconv.ParseInt(plain[3:], 2, 64)
+				intv, err := strconv.ParseInt("-" + plain[3:], 2, 64)
 				if err == nil {
-					if intv == int64(int(intv)) {
-						return yaml_INT_TAG, -int(intv)
+					if true || intv == int64(int(intv)) {
+						return yaml_INT_TAG, int(intv)
 					} else {
-						return yaml_INT_TAG, -intv
+						return yaml_INT_TAG, intv
 					}
 				}
 			}
-			// XXX Handle timestamps here.
-
 		default:
 			panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
 		}
 	}
-	if tag == yaml_BINARY_TAG {
-		return yaml_BINARY_TAG, in
-	}
-	if utf8.ValidString(in) {
-		return yaml_STR_TAG, in
-	}
-	return yaml_BINARY_TAG, encodeBase64(in)
+	return yaml_STR_TAG, in
 }
 
 // encodeBase64 encodes s as base64 that is broken up into multiple lines
@@ -206,3 +220,39 @@ func encodeBase64(s string) string {
 	}
 	return string(out[:k])
 }
+
+// This is a subset of the formats allowed by the regular expression
+// defined at http://yaml.org/type/timestamp.html.
+var allowedTimestampFormats = []string{
+	"2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
+	"2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
+	"2006-1-2 15:4:5.999999999",       // space separated with no time zone
+	"2006-1-2",                        // date only
+	// Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
+	// from the set of examples.
+}
+
+// parseTimestamp parses s as a timestamp string and
+// returns the timestamp and reports whether it succeeded.
+// Timestamp formats are defined at http://yaml.org/type/timestamp.html
+func parseTimestamp(s string) (time.Time, bool) {
+	// TODO write code to check all the formats supported by
+	// http://yaml.org/type/timestamp.html instead of using time.Parse.
+
+	// Quick check: all date formats start with YYYY-.
+	i := 0
+	for ; i < len(s); i++ {
+		if c := s[i]; c < '0' || c > '9' {
+			break
+		}
+	}
+	if i != 4 || i == len(s) || s[i] != '-' {
+		return time.Time{}, false
+	}
+	for _, format := range allowedTimestampFormats {
+		if t, err := time.Parse(format, s); err == nil {
+			return t, true
+		}
+	}
+	return time.Time{}, false
+}

+ 7 - 22
vendor/gopkg.in/yaml.v2/scannerc.go

@@ -871,12 +871,6 @@ func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
 
 	required := parser.flow_level == 0 && parser.indent == parser.mark.column
 
-	// A simple key is required only when it is the first token in the current
-	// line.  Therefore it is always allowed.  But we add a check anyway.
-	if required && !parser.simple_key_allowed {
-		panic("should not happen")
-	}
-
 	//
 	// If the current position may start a simple key, save it.
 	//
@@ -2475,6 +2469,10 @@ func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, si
 			}
 		}
 
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+
 		// Check if we are at the end of the scalar.
 		if single {
 			if parser.buffer[parser.buffer_pos] == '\'' {
@@ -2487,10 +2485,6 @@ func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, si
 		}
 
 		// Consume blank characters.
-		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-			return false
-		}
-
 		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
 			if is_blank(parser.buffer, parser.buffer_pos) {
 				// Consume a space or a tab character.
@@ -2592,19 +2586,10 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b
 		// Consume non-blank characters.
 		for !is_blankz(parser.buffer, parser.buffer_pos) {
 
-			// Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13".
-			if parser.flow_level > 0 &&
-				parser.buffer[parser.buffer_pos] == ':' &&
-				!is_blankz(parser.buffer, parser.buffer_pos+1) {
-				yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
-					start_mark, "found unexpected ':'")
-				return false
-			}
-
 			// Check for indicators that may end a plain scalar.
 			if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
 				(parser.flow_level > 0 &&
-					(parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' ||
+					(parser.buffer[parser.buffer_pos] == ',' ||
 						parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
 						parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
 						parser.buffer[parser.buffer_pos] == '}')) {
@@ -2656,10 +2641,10 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b
 		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
 			if is_blank(parser.buffer, parser.buffer_pos) {
 
-				// Check for tab character that abuse indentation.
+				// Check for tab characters that abuse indentation.
 				if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
 					yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
-						start_mark, "found a tab character that violate indentation")
+						start_mark, "found a tab character that violates indentation")
 					return false
 				}
 

+ 9 - 0
vendor/gopkg.in/yaml.v2/sorter.go

@@ -51,6 +51,15 @@ func (l keyList) Less(i, j int) bool {
 		}
 		var ai, bi int
 		var an, bn int64
+		if ar[i] == '0' || br[i] == '0' {
+			for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
+				if ar[j] != '0' {
+					an = 1
+					bn = 1
+					break
+				}
+			}
+		}
 		for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
 			an = an*10 + int64(ar[ai]-'0')
 		}

+ 1 - 64
vendor/gopkg.in/yaml.v2/writerc.go

@@ -18,72 +18,9 @@ func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
 		return true
 	}
 
-	// If the output encoding is UTF-8, we don't need to recode the buffer.
-	if emitter.encoding == yaml_UTF8_ENCODING {
-		if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
-			return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
-		}
-		emitter.buffer_pos = 0
-		return true
-	}
-
-	// Recode the buffer into the raw buffer.
-	var low, high int
-	if emitter.encoding == yaml_UTF16LE_ENCODING {
-		low, high = 0, 1
-	} else {
-		high, low = 1, 0
-	}
-
-	pos := 0
-	for pos < emitter.buffer_pos {
-		// See the "reader.c" code for more details on UTF-8 encoding.  Note
-		// that we assume that the buffer contains a valid UTF-8 sequence.
-
-		// Read the next UTF-8 character.
-		octet := emitter.buffer[pos]
-
-		var w int
-		var value rune
-		switch {
-		case octet&0x80 == 0x00:
-			w, value = 1, rune(octet&0x7F)
-		case octet&0xE0 == 0xC0:
-			w, value = 2, rune(octet&0x1F)
-		case octet&0xF0 == 0xE0:
-			w, value = 3, rune(octet&0x0F)
-		case octet&0xF8 == 0xF0:
-			w, value = 4, rune(octet&0x07)
-		}
-		for k := 1; k < w; k++ {
-			octet = emitter.buffer[pos+k]
-			value = (value << 6) + (rune(octet) & 0x3F)
-		}
-		pos += w
-
-		// Write the character.
-		if value < 0x10000 {
-			var b [2]byte
-			b[high] = byte(value >> 8)
-			b[low] = byte(value & 0xFF)
-			emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
-		} else {
-			// Write the character using a surrogate pair (check "reader.c").
-			var b [4]byte
-			value -= 0x10000
-			b[high] = byte(0xD8 + (value >> 18))
-			b[low] = byte((value >> 10) & 0xFF)
-			b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
-			b[low+2] = byte(value & 0xFF)
-			emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
-		}
-	}
-
-	// Write the raw buffer.
-	if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
+	if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
 		return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
 	}
 	emitter.buffer_pos = 0
-	emitter.raw_buffer = emitter.raw_buffer[:0]
 	return true
 }

+ 116 - 7
vendor/gopkg.in/yaml.v2/yaml.go

@@ -9,6 +9,7 @@ package yaml
 import (
 	"errors"
 	"fmt"
+	"io"
 	"reflect"
 	"strings"
 	"sync"
@@ -81,12 +82,58 @@ func Unmarshal(in []byte, out interface{}) (err error) {
 }
 
 // UnmarshalStrict is like Unmarshal except that any fields that are found
-// in the data that do not have corresponding struct members will result in
+// in the data that do not have corresponding struct members, or mapping
+// keys that are duplicates, will result in
 // an error.
 func UnmarshalStrict(in []byte, out interface{}) (err error) {
 	return unmarshal(in, out, true)
 }
 
+// A Decorder reads and decodes YAML values from an input stream.
+type Decoder struct {
+	strict bool
+	parser *parser
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may read
+// data from r beyond the YAML values requested.
+func NewDecoder(r io.Reader) *Decoder {
+	return &Decoder{
+		parser: newParserFromReader(r),
+	}
+}
+
+// SetStrict sets whether strict decoding behaviour is enabled when
+// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict.
+func (dec *Decoder) SetStrict(strict bool) {
+	dec.strict = strict
+}
+
+// Decode reads the next YAML-encoded value from its input
+// and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (dec *Decoder) Decode(v interface{}) (err error) {
+	d := newDecoder(dec.strict)
+	defer handleErr(&err)
+	node := dec.parser.parse()
+	if node == nil {
+		return io.EOF
+	}
+	out := reflect.ValueOf(v)
+	if out.Kind() == reflect.Ptr && !out.IsNil() {
+		out = out.Elem()
+	}
+	d.unmarshal(node, out)
+	if len(d.terrors) > 0 {
+		return &TypeError{d.terrors}
+	}
+	return nil
+}
+
 func unmarshal(in []byte, out interface{}, strict bool) (err error) {
 	defer handleErr(&err)
 	d := newDecoder(strict)
@@ -110,8 +157,8 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) {
 // of the generated document will reflect the structure of the value itself.
 // Maps and pointers (to struct, string, int, etc) are accepted as the in value.
 //
-// Struct fields are only unmarshalled if they are exported (have an upper case
-// first letter), and are unmarshalled using the field name lowercased as the
+// Struct fields are only marshalled if they are exported (have an upper case
+// first letter), and are marshalled using the field name lowercased as the
 // default key. Custom keys may be defined via the "yaml" name in the field
 // tag: the content preceding the first comma is used as the key, and the
 // following comma-separated options are used to tweak the marshalling process.
@@ -125,7 +172,10 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) {
 //
 //     omitempty    Only include the field if it's not set to the zero
 //                  value for the type or to empty slices or maps.
-//                  Does not apply to zero valued structs.
+//                  Zero valued structs will be omitted if all their public
+//                  fields are zero, unless they implement an IsZero
+//                  method (see the IsZeroer interface type), in which
+//                  case the field will be included if that method returns true.
 //
 //     flow         Marshal using a flow style (useful for structs,
 //                  sequences and maps).
@@ -150,12 +200,47 @@ func Marshal(in interface{}) (out []byte, err error) {
 	defer handleErr(&err)
 	e := newEncoder()
 	defer e.destroy()
-	e.marshal("", reflect.ValueOf(in))
+	e.marshalDoc("", reflect.ValueOf(in))
 	e.finish()
 	out = e.out
 	return
 }
 
+// An Encoder writes YAML values to an output stream.
+type Encoder struct {
+	encoder *encoder
+}
+
+// NewEncoder returns a new encoder that writes to w.
+// The Encoder should be closed after use to flush all data
+// to w.
+func NewEncoder(w io.Writer) *Encoder {
+	return &Encoder{
+		encoder: newEncoderWithWriter(w),
+	}
+}
+
+// Encode writes the YAML encoding of v to the stream.
+// If multiple items are encoded to the stream, the
+// second and subsequent document will be preceded
+// with a "---" document separator, but the first will not.
+//
+// See the documentation for Marshal for details about the conversion of Go
+// values to YAML.
+func (e *Encoder) Encode(v interface{}) (err error) {
+	defer handleErr(&err)
+	e.encoder.marshalDoc("", reflect.ValueOf(v))
+	return nil
+}
+
+// Close closes the encoder by writing any remaining data.
+// It does not write a stream terminating string "...".
+func (e *Encoder) Close() (err error) {
+	defer handleErr(&err)
+	e.encoder.finish()
+	return nil
+}
+
 func handleErr(err *error) {
 	if v := recover(); v != nil {
 		if e, ok := v.(yamlError); ok {
@@ -211,6 +296,9 @@ type fieldInfo struct {
 	Num       int
 	OmitEmpty bool
 	Flow      bool
+	// Id holds the unique field identifier, so we can cheaply
+	// check for field duplicates without maintaining an extra map.
+	Id int
 
 	// Inline holds the field index if the field is part of an inlined struct.
 	Inline []int
@@ -290,6 +378,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
 					} else {
 						finfo.Inline = append([]int{i}, finfo.Inline...)
 					}
+					finfo.Id = len(fieldsList)
 					fieldsMap[finfo.Key] = finfo
 					fieldsList = append(fieldsList, finfo)
 				}
@@ -311,11 +400,16 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
 			return nil, errors.New(msg)
 		}
 
+		info.Id = len(fieldsList)
 		fieldsList = append(fieldsList, info)
 		fieldsMap[info.Key] = info
 	}
 
-	sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
+	sinfo = &structInfo{
+		FieldsMap:  fieldsMap,
+		FieldsList: fieldsList,
+		InlineMap:  inlineMap,
+	}
 
 	fieldMapMutex.Lock()
 	structMap[st] = sinfo
@@ -323,8 +417,23 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
 	return sinfo, nil
 }
 
+// IsZeroer is used to check whether an object is zero to
+// determine whether it should be omitted when marshaling
+// with the omitempty flag. One notable implementation
+// is time.Time.
+type IsZeroer interface {
+	IsZero() bool
+}
+
 func isZero(v reflect.Value) bool {
-	switch v.Kind() {
+	kind := v.Kind()
+	if z, ok := v.Interface().(IsZeroer); ok {
+		if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
+			return true
+		}
+		return z.IsZero()
+	}
+	switch kind {
 	case reflect.String:
 		return len(v.String()) == 0
 	case reflect.Interface, reflect.Ptr:

+ 26 - 4
vendor/gopkg.in/yaml.v2/yamlh.go

@@ -1,6 +1,7 @@
 package yaml
 
 import (
+	"fmt"
 	"io"
 )
 
@@ -239,6 +240,27 @@ const (
 	yaml_MAPPING_END_EVENT    // A MAPPING-END event.
 )
 
+var eventStrings = []string{
+	yaml_NO_EVENT:             "none",
+	yaml_STREAM_START_EVENT:   "stream start",
+	yaml_STREAM_END_EVENT:     "stream end",
+	yaml_DOCUMENT_START_EVENT: "document start",
+	yaml_DOCUMENT_END_EVENT:   "document end",
+	yaml_ALIAS_EVENT:          "alias",
+	yaml_SCALAR_EVENT:         "scalar",
+	yaml_SEQUENCE_START_EVENT: "sequence start",
+	yaml_SEQUENCE_END_EVENT:   "sequence end",
+	yaml_MAPPING_START_EVENT:  "mapping start",
+	yaml_MAPPING_END_EVENT:    "mapping end",
+}
+
+func (e yaml_event_type_t) String() string {
+	if e < 0 || int(e) >= len(eventStrings) {
+		return fmt.Sprintf("unknown event %d", e)
+	}
+	return eventStrings[e]
+}
+
 // The event structure.
 type yaml_event_t struct {
 
@@ -521,9 +543,9 @@ type yaml_parser_t struct {
 
 	read_handler yaml_read_handler_t // Read handler.
 
-	input_file io.Reader // File input data.
-	input      []byte    // String input data.
-	input_pos  int
+	input_reader io.Reader // File input data.
+	input        []byte    // String input data.
+	input_pos    int
 
 	eof bool // EOF flag
 
@@ -632,7 +654,7 @@ type yaml_emitter_t struct {
 	write_handler yaml_write_handler_t // Write handler.
 
 	output_buffer *[]byte   // String output data.
-	output_file   io.Writer // File output data.
+	output_writer io.Writer // File output data.
 
 	buffer     []byte // The working buffer.
 	buffer_pos int    // The current position of the buffer.