From 47687cc5b191201ad64fc944e0eca84f036d90bf Mon Sep 17 00:00:00 2001
From: Minho Hello Goodbye List List List List List Nested list ####### Header 7 #Header 1 ##Header 2 ###Header 3 ####Header 4 #####Header 5 ######Header 6 #######Header 7 Hello Goodbye List List List Nested list } Hello Goodbye List List List List List Nested list List List List List List Nested list Hello Goodbye List List List List List Nested list Hello Goodbye List List List List List Nested list Paragraph Paragraph ======== Paragraph ===== Paragraph Paragraph Paragraph ===== - -- * ** _ __ -*- -----* Hello Yin Yang Ting Bong Goo Yin Yang Ting Bong Goo Yin Yang Ting Bong Goo *Hello Paragraph\n* No linebreak Paragraph List List\nSecond line List Continued List List List normal text Foo Foo Yin Yang Ting Bong Goo 1 Hello 1.Hello Paragraph\n1. No linebreak Paragraph List List\nSecond line List Continued List List Foo Definition a Definition b Definition a Definition b Definition c Term 1\n:Definition a Definition a Definition b Text 1 Definition a Text 1 Definition b Text 2 Definition a Text 1
", -1)
- m.PrivatelyOwned = book.PrivatelyOwned
- m.PrivateToken = book.PrivateToken
- m.DocCount = book.DocCount
- m.CommentStatus = book.CommentStatus
- m.CommentCount = book.CommentCount
- m.CreateTime = book.CreateTime
- m.ModifyTime = book.ModifyTime
- m.Cover = book.Cover
- m.Label = book.Label
- m.Status = book.Status
- m.Editor = book.Editor
- m.Theme = book.Theme
+ m.BookId = book.BookId
+ m.BookName = book.BookName
+ m.Identify = book.Identify
+ m.OrderIndex = book.OrderIndex
+ m.Description = strings.Replace(book.Description, "\r\n", "
", -1)
+ m.PrivatelyOwned = book.PrivatelyOwned
+ m.PrivateToken = book.PrivateToken
+ m.DocCount = book.DocCount
+ m.CommentStatus = book.CommentStatus
+ m.CommentCount = book.CommentCount
+ m.CreateTime = book.CreateTime
+ m.ModifyTime = book.ModifyTime
+ m.Cover = book.Cover
+ m.Label = book.Label
+ m.Status = book.Status
+ m.Editor = book.Editor
+ m.Theme = book.Theme
-
- if book.Theme == ""{
+ if book.Theme == "" {
m.Theme = "default"
}
if book.Editor == "" {
@@ -336,61 +384,13 @@ func (book *Book) ToBookResult() *BookResult {
}
//重置文档数量
-func (m *Book) ResetDocumentNumber(book_id int) {
+func (m *Book) ResetDocumentNumber(book_id int) {
o := orm.NewOrm()
- totalCount,err := o.QueryTable(NewDocument().TableNameWithPrefix()).Filter("book_id",book_id).Count()
+ totalCount, err := o.QueryTable(NewDocument().TableNameWithPrefix()).Filter("book_id", book_id).Count()
if err == nil {
- o.Raw("UPDATE md_books SET doc_count = ? WHERE book_id = ?",int(totalCount),book_id).Exec()
- }else{
+ o.Raw("UPDATE md_books SET doc_count = ? WHERE book_id = ?", int(totalCount), book_id).Exec()
+ } else {
beego.Error(err)
}
}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/models/document.go b/models/document.go
index e4a149e3..f20767a3 100644
--- a/models/document.go
+++ b/models/document.go
@@ -155,7 +155,7 @@ func (m *Document) ReleaseContent(book_id int) {
func (m *Document) FindListByBookId(book_id int) (docs []*Document, err error) {
o := orm.NewOrm()
- _, err = o.QueryTable(m.TableNameWithPrefix()).Filter("book_id", book_id).All(&docs)
+ _, err = o.QueryTable(m.TableNameWithPrefix()).Filter("book_id", book_id).OrderBy("order_sort").All(&docs)
return
}
diff --git a/models/label.go b/models/label.go
new file mode 100644
index 00000000..7f1aa8ae
--- /dev/null
+++ b/models/label.go
@@ -0,0 +1,92 @@
+package models
+
+import (
+ "github.com/lifei6671/mindoc/conf"
+ "github.com/astaxie/beego/orm"
+ "strings"
+)
+
+type Label struct {
+ LabelId int `orm:"column(label_id);pk;auto;unique;" json:"label_id"`
+ LabelName string `orm:"column(label_name);size(50);unique" json:"label_name"`
+ BookNumber int `orm:"column(book_number)" json:"book_number"`
+}
+
+// TableName 获取对应数据库表名.
+func (m *Label) TableName() string {
+ return "label"
+}
+// TableEngine 获取数据使用的引擎.
+func (m *Label) TableEngine() string {
+ return "INNODB"
+}
+
+func (m *Label)TableNameWithPrefix() string {
+ return conf.GetDatabasePrefix() + m.TableName()
+}
+
+func NewLabel() *Label {
+ return &Label{}
+}
+
+func (m *Label) FindFirst(field string, value interface{}) (*Label,error){
+ o := orm.NewOrm()
+
+ err := o.QueryTable(m.TableNameWithPrefix()).Filter(field, value).One(m)
+
+ return m, err
+}
+
+//插入或更新标签.
+func (m *Label) InsertOrUpdate(labelName string) error {
+ o := orm.NewOrm()
+
+ err := o.QueryTable(m.TableNameWithPrefix()).Filter("label_name",labelName).One(m)
+ if err != nil && err != orm.ErrNoRows {
+ return err
+ }
+ count,_ := o.QueryTable(NewBook().TableNameWithPrefix()).Filter("label__icontains",labelName).Count()
+ m.BookNumber = int(count)
+ m.LabelName = labelName
+
+ if err == orm.ErrNoRows {
+ err = nil
+ m.LabelName = labelName
+ _,err = o.Insert(m)
+ }else{
+ _,err = o.Update(m)
+ }
+ return err
+}
+
+//批量插入或更新标签.
+func (m *Label) InsertOrUpdateMulti(labels string) {
+ if labels != "" {
+ labelArray := strings.Split(labels, ",")
+
+ for _, label := range labelArray {
+ if label != "" {
+ NewLabel().InsertOrUpdate(label)
+ }
+ }
+ }
+}
+
+//分页查找标签.
+func (m *Label) FindToPager(pageIndex, pageSize int) (labels []*Label,totalCount int,err error) {
+ o := orm.NewOrm()
+
+ count,err := o.QueryTable(m.TableNameWithPrefix()).Count()
+
+ if err != nil {
+ return
+ }
+ totalCount = int(count)
+
+ offset := (pageIndex - 1) * pageSize
+
+ _,err = o.QueryTable(m.TableNameWithPrefix()).OrderBy("-book_number").Offset(offset).Limit(pageSize).All(&labels)
+
+ return
+}
+
diff --git a/models/member.go b/models/member.go
index e9f79f5d..6bffe8f2 100644
--- a/models/member.go
+++ b/models/member.go
@@ -217,6 +217,7 @@ func (m *Member) ResolveRoleName() {
}
}
+//根据账号查找用户.
func (m *Member) FindByAccount(account string) (*Member, error) {
o := orm.NewOrm()
@@ -228,6 +229,7 @@ func (m *Member) FindByAccount(account string) (*Member, error) {
return m, err
}
+//分页查找用户.
func (m *Member) FindToPager(pageIndex, pageSize int) ([]*Member, int64, error) {
o := orm.NewOrm()
@@ -260,6 +262,7 @@ func (c *Member) IsAdministrator() bool {
return c.Role == 0 || c.Role == 1
}
+//根据指定字段查找用户.
func (m *Member) FindByFieldFirst(field string, value interface{}) (*Member, error) {
o := orm.NewOrm()
@@ -268,6 +271,7 @@ func (m *Member) FindByFieldFirst(field string, value interface{}) (*Member, err
return m, err
}
+//校验用户.
func (m *Member) Valid(is_hash_password bool) error {
//邮箱不能为空
@@ -324,6 +328,67 @@ func (m *Member) Valid(is_hash_password bool) error {
return nil
}
+//删除一个用户.
+
+func (m *Member) Delete(oldId int,newId int) error {
+ o := orm.NewOrm()
+
+ err := o.Begin()
+
+ if err != nil {
+ return err
+ }
+
+ _,err = o.Raw("DELETE FROM md_members WHERE member_id = ?",oldId).Exec()
+ if err != nil {
+ o.Rollback()
+ return err
+ }
+ _,err = o.Raw("UPDATE md_attachment SET `create_at` = ? WHERE `create_at` = ?",newId,oldId).Exec()
+
+ if err != nil {
+ o.Rollback()
+ return err
+ }
+
+ _,err = o.Raw("UPDATE md_books SET member_id = ? WHERE member_id = ?",newId,oldId).Exec()
+ if err != nil {
+ o.Rollback()
+ return err
+ }
+ _,err = o.Raw("UPDATE md_document_history SET member_id=? WHERE member_id = ?",newId,oldId).Exec()
+ if err != nil {
+ o.Rollback()
+ return err
+ }
+ _,err = o.Raw("UPDATE md_document_history SET modify_at=? WHERE modify_at = ?",newId,oldId).Exec()
+ if err != nil {
+ o.Rollback()
+ return err
+ }
+ _,err = o.Raw("UPDATE md_documents SET member_id = ? WHERE member_id = ?;",newId,oldId).Exec()
+ if err != nil {
+ o.Rollback()
+ return err
+ }
+ _,err = o.Raw("UPDATE md_documents SET modify_at = ? WHERE modify_at = ?",newId,oldId).Exec()
+ if err != nil {
+ o.Rollback()
+ return err
+ }
+ _,err = o.Raw("UPDATE md_relationship SET member_id = ? WHERE member_id = ?",newId,oldId).Exec()
+ if err != nil {
+ o.Rollback()
+ return err
+ }
+ if err = o.Commit();err != nil {
+ o.Rollback()
+ return err
+ }
+ return nil
+}
+
+
diff --git a/routers/filter.go b/routers/filter.go
index da34de98..b9811682 100644
--- a/routers/filter.go
+++ b/routers/filter.go
@@ -5,6 +5,7 @@ import (
"github.com/astaxie/beego/context"
"github.com/lifei6671/mindoc/conf"
"github.com/lifei6671/mindoc/models"
+ "encoding/json"
)
func init() {
@@ -12,7 +13,17 @@ func init() {
_, ok := ctx.Input.Session(conf.LoginSessionName).(models.Member)
if !ok {
- ctx.Redirect(302, beego.URLFor("AccountController.Login"))
+ if ctx.Input.IsAjax() {
+ jsonData := make(map[string]interface{},3)
+
+ jsonData["errcode"] = 403
+ jsonData["message"] = "请登录后再操作"
+ returnJSON, _ := json.Marshal(jsonData)
+
+ ctx.ResponseWriter.Write(returnJSON)
+ }else{
+ ctx.Redirect(302, beego.URLFor("AccountController.Login"))
+ }
}
}
beego.InsertFilter("/manager",beego.BeforeRouter,FilterUser)
diff --git a/routers/router.go b/routers/router.go
index a951dadb..880ec872 100644
--- a/routers/router.go
+++ b/routers/router.go
@@ -19,6 +19,7 @@ func init() {
beego.Router("/manager/users", &controllers.ManagerController{},"*:Users")
beego.Router("/manager/users/edit/:id", &controllers.ManagerController{},"*:EditMember")
beego.Router("/manager/member/create", &controllers.ManagerController{},"post:CreateMember")
+ beego.Router("/manager/member/delete", &controllers.ManagerController{},"post:DeleteMember")
beego.Router("/manager/member/update-member-status",&controllers.ManagerController{},"post:UpdateMemberStatus")
beego.Router("/manager/member/change-member-role", &controllers.ManagerController{},"post:ChangeMemberRole")
beego.Router("/manager/books", &controllers.ManagerController{},"*:Books")
@@ -82,5 +83,7 @@ func init() {
beego.Router("/comment/index", &controllers.CommentController{},"*:Index")
beego.Router("/search",&controllers.SearchController{},"get:Index")
+
+ beego.Router("/tag/:key", &controllers.LabelController{},"get:Index")
}
diff --git a/static/bootstrap/plugins/tagsinput/bootstrap-tagsinput.css b/static/bootstrap/plugins/tagsinput/bootstrap-tagsinput.css
new file mode 100644
index 00000000..b31f01c7
--- /dev/null
+++ b/static/bootstrap/plugins/tagsinput/bootstrap-tagsinput.css
@@ -0,0 +1,55 @@
+.bootstrap-tagsinput {
+ background-color: #fff;
+ border: 1px solid #ccc;
+ box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
+ display: inline-block;
+ padding: 4px 6px;
+ color: #555;
+ vertical-align: middle;
+ border-radius: 4px;
+ max-width: 100%;
+ line-height: 22px;
+ cursor: text;
+}
+.bootstrap-tagsinput input {
+ border: none;
+ box-shadow: none;
+ outline: none;
+ background-color: transparent;
+ padding: 0 6px;
+ margin: 0;
+ width: auto;
+ max-width: inherit;
+}
+.bootstrap-tagsinput.form-control input::-moz-placeholder {
+ color: #777;
+ opacity: 1;
+}
+.bootstrap-tagsinput.form-control input:-ms-input-placeholder {
+ color: #777;
+}
+.bootstrap-tagsinput.form-control input::-webkit-input-placeholder {
+ color: #777;
+}
+.bootstrap-tagsinput input:focus {
+ border: none;
+ box-shadow: none;
+}
+.bootstrap-tagsinput .tag {
+ margin-right: 2px;
+ color: white;
+}
+.bootstrap-tagsinput .tag [data-role="remove"] {
+ margin-left: 8px;
+ cursor: pointer;
+}
+.bootstrap-tagsinput .tag [data-role="remove"]:after {
+ content: "x";
+ padding: 0px 2px;
+}
+.bootstrap-tagsinput .tag [data-role="remove"]:hover {
+ box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05);
+}
+.bootstrap-tagsinput .tag [data-role="remove"]:hover:active {
+ box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);
+}
diff --git a/static/bootstrap/plugins/tagsinput/bootstrap-tagsinput.js b/static/bootstrap/plugins/tagsinput/bootstrap-tagsinput.js
new file mode 100644
index 00000000..2b403f77
--- /dev/null
+++ b/static/bootstrap/plugins/tagsinput/bootstrap-tagsinput.js
@@ -0,0 +1,646 @@
+(function ($) {
+ "use strict";
+
+ var defaultOptions = {
+ tagClass: function(item) {
+ return 'label label-info';
+ },
+ itemValue: function(item) {
+ return item ? item.toString() : item;
+ },
+ itemText: function(item) {
+ return this.itemValue(item);
+ },
+ itemTitle: function(item) {
+ return null;
+ },
+ freeInput: true,
+ addOnBlur: true,
+ maxTags: undefined,
+ maxChars: undefined,
+ confirmKeys: [13, 44],
+ delimiter: ',',
+ delimiterRegex: null,
+ cancelConfirmKeysOnEmpty: true,
+ onTagExists: function(item, $tag) {
+ $tag.hide().fadeIn();
+ },
+ trimValue: false,
+ allowDuplicates: false
+ };
+
+ /**
+ * Constructor function
+ */
+ function TagsInput(element, options) {
+ this.itemsArray = [];
+
+ this.$element = $(element);
+ this.$element.hide();
+
+ this.isSelect = (element.tagName === 'SELECT');
+ this.multiple = (this.isSelect && element.hasAttribute('multiple'));
+ this.objectItems = options && options.itemValue;
+ this.placeholderText = element.hasAttribute('placeholder') ? this.$element.attr('placeholder') : '';
+ this.inputSize = Math.max(1, this.placeholderText.length);
+
+ this.$container = $('');
+ this.$input = $('').appendTo(this.$container);
+
+ this.$element.before(this.$container);
+
+ this.build(options);
+ }
+
+ TagsInput.prototype = {
+ constructor: TagsInput,
+
+ /**
+ * Adds the given item as a new tag. Pass true to dontPushVal to prevent
+ * updating the elements val()
+ */
+ add: function(item, dontPushVal, options) {
+ var self = this;
+
+ if (self.options.maxTags && self.itemsArray.length >= self.options.maxTags)
+ return;
+
+ // Ignore falsey values, except false
+ if (item !== false && !item)
+ return;
+
+ // Trim value
+ if (typeof item === "string" && self.options.trimValue) {
+ item = $.trim(item);
+ }
+
+ // Throw an error when trying to add an object while the itemValue option was not set
+ if (typeof item === "object" && !self.objectItems)
+ throw("Can't add objects when itemValue option is not set");
+
+ // Ignore strings only containg whitespace
+ if (item.toString().match(/^\s*$/))
+ return;
+
+ // If SELECT but not multiple, remove current tag
+ if (self.isSelect && !self.multiple && self.itemsArray.length > 0)
+ self.remove(self.itemsArray[0]);
+
+ if (typeof item === "string" && this.$element[0].tagName === 'INPUT') {
+ var delimiter = (self.options.delimiterRegex) ? self.options.delimiterRegex : self.options.delimiter;
+ var items = item.split(delimiter);
+ if (items.length > 1) {
+ for (var i = 0; i < items.length; i++) {
+ this.add(items[i], true);
+ }
+
+ if (!dontPushVal)
+ self.pushVal();
+ return;
+ }
+ }
+
+ var itemValue = self.options.itemValue(item),
+ itemText = self.options.itemText(item),
+ tagClass = self.options.tagClass(item),
+ itemTitle = self.options.itemTitle(item);
+
+ // Ignore items allready added
+ var existing = $.grep(self.itemsArray, function(item) { return self.options.itemValue(item) === itemValue; } )[0];
+ if (existing && !self.options.allowDuplicates) {
+ // Invoke onTagExists
+ if (self.options.onTagExists) {
+ var $existingTag = $(".tag", self.$container).filter(function() { return $(this).data("item") === existing; });
+ self.options.onTagExists(item, $existingTag);
+ }
+ return;
+ }
+
+ // if length greater than limit
+ if (self.items().toString().length + item.length + 1 > self.options.maxInputLength)
+ return;
+
+ // raise beforeItemAdd arg
+ var beforeItemAddEvent = $.Event('beforeItemAdd', { item: item, cancel: false, options: options});
+ self.$element.trigger(beforeItemAddEvent);
+ if (beforeItemAddEvent.cancel)
+ return;
+
+ // register item in internal array and map
+ self.itemsArray.push(item);
+
+ // add a tag element
+
+ var $tag = $('' + htmlEncode(itemText) + '');
+ $tag.data('item', item);
+ self.findInputWrapper().before($tag);
+ $tag.after(' ');
+
+ // add if item represents a value not present in one of the 's options
+ if (self.isSelect && !$('option[value="' + encodeURIComponent(itemValue) + '"]',self.$element)[0]) {
+ var $option = $('');
+ $option.data('item', item);
+ $option.attr('value', itemValue);
+ self.$element.append($option);
+ }
+
+ if (!dontPushVal)
+ self.pushVal();
+
+ // Add class when reached maxTags
+ if (self.options.maxTags === self.itemsArray.length || self.items().toString().length === self.options.maxInputLength)
+ self.$container.addClass('bootstrap-tagsinput-max');
+
+ self.$element.trigger($.Event('itemAdded', { item: item, options: options }));
+ },
+
+ /**
+ * Removes the given item. Pass true to dontPushVal to prevent updating the
+ * elements val()
+ */
+ remove: function(item, dontPushVal, options) {
+ var self = this;
+
+ if (self.objectItems) {
+ if (typeof item === "object")
+ item = $.grep(self.itemsArray, function(other) { return self.options.itemValue(other) == self.options.itemValue(item); } );
+ else
+ item = $.grep(self.itemsArray, function(other) { return self.options.itemValue(other) == item; } );
+
+ item = item[item.length-1];
+ }
+
+ if (item) {
+ var beforeItemRemoveEvent = $.Event('beforeItemRemove', { item: item, cancel: false, options: options });
+ self.$element.trigger(beforeItemRemoveEvent);
+ if (beforeItemRemoveEvent.cancel)
+ return;
+
+ $('.tag', self.$container).filter(function() { return $(this).data('item') === item; }).remove();
+ $('option', self.$element).filter(function() { return $(this).data('item') === item; }).remove();
+ if($.inArray(item, self.itemsArray) !== -1)
+ self.itemsArray.splice($.inArray(item, self.itemsArray), 1);
+ }
+
+ if (!dontPushVal)
+ self.pushVal();
+
+ // Remove class when reached maxTags
+ if (self.options.maxTags > self.itemsArray.length)
+ self.$container.removeClass('bootstrap-tagsinput-max');
+
+ self.$element.trigger($.Event('itemRemoved', { item: item, options: options }));
+ },
+
+ /**
+ * Removes all items
+ */
+ removeAll: function() {
+ var self = this;
+
+ $('.tag', self.$container).remove();
+ $('option', self.$element).remove();
+
+ while(self.itemsArray.length > 0)
+ self.itemsArray.pop();
+
+ self.pushVal();
+ },
+
+ /**
+ * Refreshes the tags so they match the text/value of their corresponding
+ * item.
+ */
+ refresh: function() {
+ var self = this;
+ $('.tag', self.$container).each(function() {
+ var $tag = $(this),
+ item = $tag.data('item'),
+ itemValue = self.options.itemValue(item),
+ itemText = self.options.itemText(item),
+ tagClass = self.options.tagClass(item);
+
+ // Update tag's class and inner text
+ $tag.attr('class', null);
+ $tag.addClass('tag ' + htmlEncode(tagClass));
+ $tag.contents().filter(function() {
+ return this.nodeType == 3;
+ })[0].nodeValue = htmlEncode(itemText);
+
+ if (self.isSelect) {
+ var option = $('option', self.$element).filter(function() { return $(this).data('item') === item; });
+ option.attr('value', itemValue);
+ }
+ });
+ },
+
+ /**
+ * Returns the items added as tags
+ */
+ items: function() {
+ return this.itemsArray;
+ },
+
+ /**
+ * Assembly value by retrieving the value of each item, and set it on the
+ * element.
+ */
+ pushVal: function() {
+ var self = this,
+ val = $.map(self.items(), function(item) {
+ return self.options.itemValue(item).toString();
+ });
+
+ self.$element.val(val, true).trigger('change');
+ },
+
+ /**
+ * Initializes the tags input behaviour on the element
+ */
+ build: function(options) {
+ var self = this;
+
+ self.options = $.extend({}, defaultOptions, options);
+ // When itemValue is set, freeInput should always be false
+ if (self.objectItems)
+ self.options.freeInput = false;
+
+ makeOptionItemFunction(self.options, 'itemValue');
+ makeOptionItemFunction(self.options, 'itemText');
+ makeOptionFunction(self.options, 'tagClass');
+
+ // Typeahead Bootstrap version 2.3.2
+ if (self.options.typeahead) {
+ var typeahead = self.options.typeahead || {};
+
+ makeOptionFunction(typeahead, 'source');
+
+ self.$input.typeahead($.extend({}, typeahead, {
+ source: function (query, process) {
+ function processItems(items) {
+ var texts = [];
+
+ for (var i = 0; i < items.length; i++) {
+ var text = self.options.itemText(items[i]);
+ map[text] = items[i];
+ texts.push(text);
+ }
+ process(texts);
+ }
+
+ this.map = {};
+ var map = this.map,
+ data = typeahead.source(query);
+
+ if ($.isFunction(data.success)) {
+ // support for Angular callbacks
+ data.success(processItems);
+ } else if ($.isFunction(data.then)) {
+ // support for Angular promises
+ data.then(processItems);
+ } else {
+ // support for functions and jquery promises
+ $.when(data)
+ .then(processItems);
+ }
+ },
+ updater: function (text) {
+ self.add(this.map[text]);
+ return this.map[text];
+ },
+ matcher: function (text) {
+ return (text.toLowerCase().indexOf(this.query.trim().toLowerCase()) !== -1);
+ },
+ sorter: function (texts) {
+ return texts.sort();
+ },
+ highlighter: function (text) {
+ var regex = new RegExp( '(' + this.query + ')', 'gi' );
+ return text.replace( regex, "$1" );
+ }
+ }));
+ }
+
+ // typeahead.js
+ if (self.options.typeaheadjs) {
+ var typeaheadConfig = null;
+ var typeaheadDatasets = {};
+
+ // Determine if main configurations were passed or simply a dataset
+ var typeaheadjs = self.options.typeaheadjs;
+ if ($.isArray(typeaheadjs)) {
+ typeaheadConfig = typeaheadjs[0];
+ typeaheadDatasets = typeaheadjs[1];
+ } else {
+ typeaheadDatasets = typeaheadjs;
+ }
+
+ self.$input.typeahead(typeaheadConfig, typeaheadDatasets).on('typeahead:selected', $.proxy(function (obj, datum) {
+ if (typeaheadDatasets.valueKey)
+ self.add(datum[typeaheadDatasets.valueKey]);
+ else
+ self.add(datum);
+ self.$input.typeahead('val', '');
+ }, self));
+ }
+
+ self.$container.on('click', $.proxy(function(event) {
+ if (! self.$element.attr('disabled')) {
+ self.$input.removeAttr('disabled');
+ }
+ self.$input.focus();
+ }, self));
+
+ if (self.options.addOnBlur && self.options.freeInput) {
+ self.$input.on('focusout', $.proxy(function(event) {
+ // HACK: only process on focusout when no typeahead opened, to
+ // avoid adding the typeahead text as tag
+ if ($('.typeahead, .twitter-typeahead', self.$container).length === 0) {
+ self.add(self.$input.val());
+ self.$input.val('');
+ }
+ }, self));
+ }
+
+
+ self.$container.on('keydown', 'input', $.proxy(function(event) {
+ var $input = $(event.target),
+ $inputWrapper = self.findInputWrapper();
+
+ if (self.$element.attr('disabled')) {
+ self.$input.attr('disabled', 'disabled');
+ return;
+ }
+
+ switch (event.which) {
+ // BACKSPACE
+ case 8:
+ if (doGetCaretPosition($input[0]) === 0) {
+ var prev = $inputWrapper.prev();
+ if (prev.length) {
+ self.remove(prev.data('item'));
+ }
+ }
+ break;
+
+ // DELETE
+ case 46:
+ if (doGetCaretPosition($input[0]) === 0) {
+ var next = $inputWrapper.next();
+ if (next.length) {
+ self.remove(next.data('item'));
+ }
+ }
+ break;
+
+ // LEFT ARROW
+ case 37:
+ // Try to move the input before the previous tag
+ var $prevTag = $inputWrapper.prev();
+ if ($input.val().length === 0 && $prevTag[0]) {
+ $prevTag.before($inputWrapper);
+ $input.focus();
+ }
+ break;
+ // RIGHT ARROW
+ case 39:
+ // Try to move the input after the next tag
+ var $nextTag = $inputWrapper.next();
+ if ($input.val().length === 0 && $nextTag[0]) {
+ $nextTag.after($inputWrapper);
+ $input.focus();
+ }
+ break;
+ default:
+ // ignore
+ }
+
+ // Reset internal input's size
+ var textLength = $input.val().length,
+ wordSpace = Math.ceil(textLength / 5),
+ size = textLength + wordSpace + 1;
+ $input.attr('size', Math.max(this.inputSize, $input.val().length));
+ }, self));
+
+ self.$container.on('keypress', 'input', $.proxy(function(event) {
+ var $input = $(event.target);
+
+ if (self.$element.attr('disabled')) {
+ self.$input.attr('disabled', 'disabled');
+ return;
+ }
+
+ var text = $input.val(),
+ maxLengthReached = self.options.maxChars && text.length >= self.options.maxChars;
+ if (self.options.freeInput && (keyCombinationInList(event, self.options.confirmKeys) || maxLengthReached)) {
+ // Only attempt to add a tag if there is data in the field
+ if (text.length !== 0) {
+ self.add(maxLengthReached ? text.substr(0, self.options.maxChars) : text);
+ $input.val('');
+ }
+
+ // If the field is empty, let the event triggered fire as usual
+ if (self.options.cancelConfirmKeysOnEmpty === false) {
+ event.preventDefault();
+ }
+ }
+
+ // Reset internal input's size
+ var textLength = $input.val().length,
+ wordSpace = Math.ceil(textLength / 5),
+ size = textLength + wordSpace + 1;
+ $input.attr('size', Math.max(this.inputSize, $input.val().length));
+ }, self));
+
+ // Remove icon clicked
+ self.$container.on('click', '[data-role=remove]', $.proxy(function(event) {
+ if (self.$element.attr('disabled')) {
+ return;
+ }
+ self.remove($(event.target).closest('.tag').data('item'));
+ }, self));
+
+ // Only add existing value as tags when using strings as tags
+ if (self.options.itemValue === defaultOptions.itemValue) {
+ if (self.$element[0].tagName === 'INPUT') {
+ self.add(self.$element.val());
+ } else {
+ $('option', self.$element).each(function() {
+ self.add($(this).attr('value'), true);
+ });
+ }
+ }
+ },
+
+ /**
+ * Removes all tagsinput behaviour and unregsiter all event handlers
+ */
+ destroy: function() {
+ var self = this;
+
+ // Unbind events
+ self.$container.off('keypress', 'input');
+ self.$container.off('click', '[role=remove]');
+
+ self.$container.remove();
+ self.$element.removeData('tagsinput');
+ self.$element.show();
+ },
+
+ /**
+ * Sets focus on the tagsinput
+ */
+ focus: function() {
+ this.$input.focus();
+ },
+
+ /**
+ * Returns the internal input element
+ */
+ input: function() {
+ return this.$input;
+ },
+
+ /**
+ * Returns the element which is wrapped around the internal input. This
+ * is normally the $container, but typeahead.js moves the $input element.
+ */
+ findInputWrapper: function() {
+ var elt = this.$input[0],
+ container = this.$container[0];
+ while(elt && elt.parentNode !== container)
+ elt = elt.parentNode;
+
+ return $(elt);
+ }
+ };
+
+ /**
+ * Register JQuery plugin
+ */
+ $.fn.tagsinput = function(arg1, arg2, arg3) {
+ var results = [];
+
+ this.each(function() {
+ var tagsinput = $(this).data('tagsinput');
+ // Initialize a new tags input
+ if (!tagsinput) {
+ tagsinput = new TagsInput(this, arg1);
+ $(this).data('tagsinput', tagsinput);
+ results.push(tagsinput);
+
+ if (this.tagName === 'SELECT') {
+ $('option', $(this)).attr('selected', 'selected');
+ }
+
+ // Init tags from $(this).val()
+ $(this).val($(this).val());
+ } else if (!arg1 && !arg2) {
+ // tagsinput already exists
+ // no function, trying to init
+ results.push(tagsinput);
+ } else if(tagsinput[arg1] !== undefined) {
+ // Invoke function on existing tags input
+ if(tagsinput[arg1].length === 3 && arg3 !== undefined){
+ var retVal = tagsinput[arg1](arg2, null, arg3);
+ }else{
+ var retVal = tagsinput[arg1](arg2);
+ }
+ if (retVal !== undefined)
+ results.push(retVal);
+ }
+ });
+
+ if ( typeof arg1 == 'string') {
+ // Return the results from the invoked function calls
+ return results.length > 1 ? results : results[0];
+ } else {
+ return results;
+ }
+ };
+
+ $.fn.tagsinput.Constructor = TagsInput;
+
+ /**
+ * Most options support both a string or number as well as a function as
+ * option value. This function makes sure that the option with the given
+ * key in the given options is wrapped in a function
+ */
+ function makeOptionItemFunction(options, key) {
+ if (typeof options[key] !== 'function') {
+ var propertyName = options[key];
+ options[key] = function(item) { return item[propertyName]; };
+ }
+ }
+ function makeOptionFunction(options, key) {
+ if (typeof options[key] !== 'function') {
+ var value = options[key];
+ options[key] = function() { return value; };
+ }
+ }
+ /**
+ * HtmlEncodes the given value
+ */
+ var htmlEncodeContainer = $('');
+ function htmlEncode(value) {
+ if (value) {
+ return htmlEncodeContainer.text(value).html();
+ } else {
+ return '';
+ }
+ }
+
+ /**
+ * Returns the position of the caret in the given input field
+ * http://flightschool.acylt.com/devnotes/caret-position-woes/
+ */
+ function doGetCaretPosition(oField) {
+ var iCaretPos = 0;
+ if (document.selection) {
+ oField.focus ();
+ var oSel = document.selection.createRange();
+ oSel.moveStart ('character', -oField.value.length);
+ iCaretPos = oSel.text.length;
+ } else if (oField.selectionStart || oField.selectionStart == '0') {
+ iCaretPos = oField.selectionStart;
+ }
+ return (iCaretPos);
+ }
+
+ /**
+ * Returns boolean indicates whether user has pressed an expected key combination.
+ * @param object keyPressEvent: JavaScript event object, refer
+ * http://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html
+ * @param object lookupList: expected key combinations, as in:
+ * [13, {which: 188, shiftKey: true}]
+ */
+ function keyCombinationInList(keyPressEvent, lookupList) {
+ var found = false;
+ $.each(lookupList, function (index, keyCombination) {
+ if (typeof (keyCombination) === 'number' && keyPressEvent.which === keyCombination) {
+ found = true;
+ return false;
+ }
+
+ if (keyPressEvent.which === keyCombination.which) {
+ var alt = !keyCombination.hasOwnProperty('altKey') || keyPressEvent.altKey === keyCombination.altKey,
+ shift = !keyCombination.hasOwnProperty('shiftKey') || keyPressEvent.shiftKey === keyCombination.shiftKey,
+ ctrl = !keyCombination.hasOwnProperty('ctrlKey') || keyPressEvent.ctrlKey === keyCombination.ctrlKey;
+ if (alt && shift && ctrl) {
+ found = true;
+ return false;
+ }
+ }
+ });
+
+ return found;
+ }
+
+ /**
+ * Initialize tagsinput behaviour on inputs and selects which have
+ * data-role=tagsinput
+ */
+ $(function() {
+ $("input[data-role=tagsinput], select[multiple][data-role=tagsinput]").tagsinput();
+ });
+})(window.jQuery);
diff --git a/static/bootstrap/plugins/tagsinput/bootstrap-tagsinput.less b/static/bootstrap/plugins/tagsinput/bootstrap-tagsinput.less
new file mode 100644
index 00000000..face63f1
--- /dev/null
+++ b/static/bootstrap/plugins/tagsinput/bootstrap-tagsinput.less
@@ -0,0 +1,50 @@
+.bootstrap-tagsinput {
+ background-color: #fff;
+ border: 1px solid #ccc;
+ box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
+ display: inline-block;
+ padding: 4px 6px;
+ margin-bottom: 10px;
+ color: #555;
+ vertical-align: middle;
+ border-radius: 4px;
+ max-width: 100%;
+ line-height: 22px;
+ cursor: text;
+
+ input {
+ border: none;
+ box-shadow: none;
+ outline: none;
+ background-color: transparent;
+ padding: 0;
+ margin: 0;
+ width: auto !important;
+ max-width: inherit;
+
+ &:focus {
+ border: none;
+ box-shadow: none;
+ }
+ }
+
+ .tag {
+ margin-right: 2px;
+ color: white;
+
+ [data-role="remove"] {
+ margin-left:8px;
+ cursor:pointer;
+ &:after{
+ content: "x";
+ padding:0px 2px;
+ }
+ &:hover {
+ box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05);
+ &:active {
+ box-shadow: inset 0 3px 5px rgba(0,0,0,0.125);
+ }
+ }
+ }
+ }
+}
diff --git a/static/bootstrap/plugins/tagsinput/bootstrap-tagsinput.min.js b/static/bootstrap/plugins/tagsinput/bootstrap-tagsinput.min.js
new file mode 100644
index 00000000..3adbfd91
--- /dev/null
+++ b/static/bootstrap/plugins/tagsinput/bootstrap-tagsinput.min.js
@@ -0,0 +1,7 @@
+/*
+ * bootstrap-tagsinput v0.6.1 by Tim Schlechter
+ *
+ */
+
+!function(a){"use strict";function b(b,c){this.itemsArray=[],this.$element=a(b),this.$element.hide(),this.isSelect="SELECT"===b.tagName,this.multiple=this.isSelect&&b.hasAttribute("multiple"),this.objectItems=c&&c.itemValue,this.placeholderText=b.hasAttribute("placeholder")?this.$element.attr("placeholder"):"",this.inputSize=Math.max(1,this.placeholderText.length),this.$container=a(''),this.$input=a('').appendTo(this.$container),this.$element.before(this.$container),this.build(c)}function c(a,b){if("function"!=typeof a[b]){var c=a[b];a[b]=function(a){return a[c]}}}function d(a,b){if("function"!=typeof a[b]){var c=a[b];a[b]=function(){return c}}}function e(a){return a?i.text(a).html():""}function f(a){var b=0;if(document.selection){a.focus();var c=document.selection.createRange();c.moveStart("character",-a.value.length),b=c.text.length}else(a.selectionStart||"0"==a.selectionStart)&&(b=a.selectionStart);return b}function g(b,c){var d=!1;return a.each(c,function(a,c){if("number"==typeof c&&b.which===c)return d=!0,!1;if(b.which===c.which){var e=!c.hasOwnProperty("altKey")||b.altKey===c.altKey,f=!c.hasOwnProperty("shiftKey")||b.shiftKey===c.shiftKey,g=!c.hasOwnProperty("ctrlKey")||b.ctrlKey===c.ctrlKey;if(e&&f&&g)return d=!0,!1}}),d}var h={tagClass:function(a){return"label label-info"},itemValue:function(a){return a?a.toString():a},itemText:function(a){return this.itemValue(a)},itemTitle:function(a){return null},freeInput:!0,addOnBlur:!0,maxTags:void 0,maxChars:void 0,confirmKeys:[13,44],delimiter:",",delimiterRegex:null,cancelConfirmKeysOnEmpty:!0,onTagExists:function(a,b){b.hide().fadeIn()},trimValue:!1,allowDuplicates:!1};b.prototype={constructor:b,add:function(b,c,d){var f=this;if(!(f.options.maxTags&&f.itemsArray.length>=f.options.maxTags)&&(b===!1||b)){if("string"==typeof b&&f.options.trimValue&&(b=a.trim(b)),"object"==typeof b&&!f.objectItems)throw"Can't add objects when itemValue option is not set";if(!b.toString().match(/^\s*$/)){if(f.isSelect&&!f.multiple&&f.itemsArray.length>0&&f.remove(f.itemsArray[0]),"string"==typeof b&&"INPUT"===this.$element[0].tagName){var g=f.options.delimiterRegex?f.options.delimiterRegex:f.options.delimiter,h=b.split(g);if(h.length>1){for(var i=0;i
tag
+ if size := p.htmlHr(out, data, doRender); size > 0 {
+ return size
+ }
+
+ // check for HTML CDATA
+ if size := p.htmlCDATA(out, data, doRender); size > 0 {
+ return size
+ }
+
+ // no special case recognized
+ return 0
+ }
+
+ // look for an unindented matching closing tag
+ // followed by a blank line
+ found := false
+ /*
+ closetag := []byte("\n" + curtag + ">")
+ j = len(curtag) + 1
+ for !found {
+ // scan for a closing tag at the beginning of a line
+ if skip := bytes.Index(data[j:], closetag); skip >= 0 {
+ j += skip + len(closetag)
+ } else {
+ break
+ }
+
+ // see if it is the only thing on the line
+ if skip := p.isEmpty(data[j:]); skip > 0 {
+ // see if it is followed by a blank line/eof
+ j += skip
+ if j >= len(data) {
+ found = true
+ i = j
+ } else {
+ if skip := p.isEmpty(data[j:]); skip > 0 {
+ j += skip
+ found = true
+ i = j
+ }
+ }
+ }
+ }
+ */
+
+ // if not found, try a second pass looking for indented match
+ // but not if tag is "ins" or "del" (following original Markdown.pl)
+ if !found && curtag != "ins" && curtag != "del" {
+ i = 1
+ for i < len(data) {
+ i++
+ for i < len(data) && !(data[i-1] == '<' && data[i] == '/') {
+ i++
+ }
+
+ if i+2+len(curtag) >= len(data) {
+ break
+ }
+
+ j = p.htmlFindEnd(curtag, data[i-1:])
+
+ if j > 0 {
+ i += j - 1
+ found = true
+ break
+ }
+ }
+ }
+
+ if !found {
+ return 0
+ }
+
+ // the end of the block has been found
+ if doRender {
+ // trim newlines
+ end := i
+ for end > 0 && data[end-1] == '\n' {
+ end--
+ }
+ p.r.BlockHtml(out, data[:end])
+ }
+
+ return i
+}
+
+func (p *parser) renderHTMLBlock(out *bytes.Buffer, data []byte, start int, doRender bool) int {
+ // html block needs to end with a blank line
+ if i := p.isEmpty(data[start:]); i > 0 {
+ size := start + i
+ if doRender {
+ // trim trailing newlines
+ end := size
+ for end > 0 && data[end-1] == '\n' {
+ end--
+ }
+ p.r.BlockHtml(out, data[:end])
+ }
+ return size
+ }
+ return 0
+}
+
+// HTML comment, lax form
+func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int {
+ i := p.inlineHTMLComment(out, data)
+ return p.renderHTMLBlock(out, data, i, doRender)
+}
+
+// HTML CDATA section
+func (p *parser) htmlCDATA(out *bytes.Buffer, data []byte, doRender bool) int {
+ const cdataTag = "') {
+ i++
+ }
+ i++
+ // no end-of-comment marker
+ if i >= len(data) {
+ return 0
+ }
+ return p.renderHTMLBlock(out, data, i, doRender)
+}
+
+// HR, which is the only self-closing block tag considered
+func (p *parser) htmlHr(out *bytes.Buffer, data []byte, doRender bool) int {
+ if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') {
+ return 0
+ }
+ if data[3] != ' ' && data[3] != '/' && data[3] != '>' {
+ // not an
tag after all; at least not a valid one
+ return 0
+ }
+
+ i := 3
+ for data[i] != '>' && data[i] != '\n' {
+ i++
+ }
+
+ if data[i] == '>' {
+ return p.renderHTMLBlock(out, data, i+1, doRender)
+ }
+
+ return 0
+}
+
+func (p *parser) htmlFindTag(data []byte) (string, bool) {
+ i := 0
+ for isalnum(data[i]) {
+ i++
+ }
+ key := string(data[:i])
+ if _, ok := blockTags[key]; ok {
+ return key, true
+ }
+ return "", false
+}
+
+func (p *parser) htmlFindEnd(tag string, data []byte) int {
+ // assume data[0] == '<' && data[1] == '/' already tested
+
+ // check if tag is a match
+ closetag := []byte("" + tag + ">")
+ if !bytes.HasPrefix(data, closetag) {
+ return 0
+ }
+ i := len(closetag)
+
+ // check that the rest of the line is blank
+ skip := 0
+ if skip = p.isEmpty(data[i:]); skip == 0 {
+ return 0
+ }
+ i += skip
+ skip = 0
+
+ if i >= len(data) {
+ return i
+ }
+
+ if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 {
+ return i
+ }
+ if skip = p.isEmpty(data[i:]); skip == 0 {
+ // following line must be blank
+ return 0
+ }
+
+ return i + skip
+}
+
+func (*parser) isEmpty(data []byte) int {
+ // it is okay to call isEmpty on an empty buffer
+ if len(data) == 0 {
+ return 0
+ }
+
+ var i int
+ for i = 0; i < len(data) && data[i] != '\n'; i++ {
+ if data[i] != ' ' && data[i] != '\t' {
+ return 0
+ }
+ }
+ return i + 1
+}
+
+func (*parser) isHRule(data []byte) bool {
+ i := 0
+
+ // skip up to three spaces
+ for i < 3 && data[i] == ' ' {
+ i++
+ }
+
+ // look at the hrule char
+ if data[i] != '*' && data[i] != '-' && data[i] != '_' {
+ return false
+ }
+ c := data[i]
+
+ // the whole line must be the char or whitespace
+ n := 0
+ for data[i] != '\n' {
+ switch {
+ case data[i] == c:
+ n++
+ case data[i] != ' ':
+ return false
+ }
+ i++
+ }
+
+ return n >= 3
+}
+
+// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data,
+// and returns the end index if so, or 0 otherwise. It also returns the marker found.
+// If syntax is not nil, it gets set to the syntax specified in the fence line.
+// A final newline is mandatory to recognize the fence line, unless newlineOptional is true.
+func isFenceLine(data []byte, syntax *string, oldmarker string, newlineOptional bool) (end int, marker string) {
+ i, size := 0, 0
+
+ // skip up to three spaces
+ for i < len(data) && i < 3 && data[i] == ' ' {
+ i++
+ }
+
+ // check for the marker characters: ~ or `
+ if i >= len(data) {
+ return 0, ""
+ }
+ if data[i] != '~' && data[i] != '`' {
+ return 0, ""
+ }
+
+ c := data[i]
+
+ // the whole line must be the same char or whitespace
+ for i < len(data) && data[i] == c {
+ size++
+ i++
+ }
+
+ // the marker char must occur at least 3 times
+ if size < 3 {
+ return 0, ""
+ }
+ marker = string(data[i-size : i])
+
+ // if this is the end marker, it must match the beginning marker
+ if oldmarker != "" && marker != oldmarker {
+ return 0, ""
+ }
+
+ // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here
+ // into one, always get the syntax, and discard it if the caller doesn't care.
+ if syntax != nil {
+ syn := 0
+ i = skipChar(data, i, ' ')
+
+ if i >= len(data) {
+ if newlineOptional && i == len(data) {
+ return i, marker
+ }
+ return 0, ""
+ }
+
+ syntaxStart := i
+
+ if data[i] == '{' {
+ i++
+ syntaxStart++
+
+ for i < len(data) && data[i] != '}' && data[i] != '\n' {
+ syn++
+ i++
+ }
+
+ if i >= len(data) || data[i] != '}' {
+ return 0, ""
+ }
+
+ // strip all whitespace at the beginning and the end
+ // of the {} block
+ for syn > 0 && isspace(data[syntaxStart]) {
+ syntaxStart++
+ syn--
+ }
+
+ for syn > 0 && isspace(data[syntaxStart+syn-1]) {
+ syn--
+ }
+
+ i++
+ } else {
+ for i < len(data) && !isspace(data[i]) {
+ syn++
+ i++
+ }
+ }
+
+ *syntax = string(data[syntaxStart : syntaxStart+syn])
+ }
+
+ i = skipChar(data, i, ' ')
+ if i >= len(data) || data[i] != '\n' {
+ if newlineOptional && i == len(data) {
+ return i, marker
+ }
+ return 0, ""
+ }
+
+ return i + 1, marker // Take newline into account.
+}
+
+// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning,
+// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects.
+// If doRender is true, a final newline is mandatory to recognize the fenced code block.
+func (p *parser) fencedCodeBlock(out *bytes.Buffer, data []byte, doRender bool) int {
+ var syntax string
+ beg, marker := isFenceLine(data, &syntax, "", false)
+ if beg == 0 || beg >= len(data) {
+ return 0
+ }
+
+ var work bytes.Buffer
+
+ for {
+ // safe to assume beg < len(data)
+
+ // check for the end of the code block
+ newlineOptional := !doRender
+ fenceEnd, _ := isFenceLine(data[beg:], nil, marker, newlineOptional)
+ if fenceEnd != 0 {
+ beg += fenceEnd
+ break
+ }
+
+ // copy the current line
+ end := skipUntilChar(data, beg, '\n') + 1
+
+ // did we reach the end of the buffer without a closing marker?
+ if end >= len(data) {
+ return 0
+ }
+
+ // verbatim copy to the working buffer
+ if doRender {
+ work.Write(data[beg:end])
+ }
+ beg = end
+ }
+
+ if doRender {
+ p.r.BlockCode(out, work.Bytes(), syntax)
+ }
+
+ return beg
+}
+
+func (p *parser) table(out *bytes.Buffer, data []byte) int {
+ var header bytes.Buffer
+ i, columns := p.tableHeader(&header, data)
+ if i == 0 {
+ return 0
+ }
+
+ var body bytes.Buffer
+
+ for i < len(data) {
+ pipes, rowStart := 0, i
+ for ; data[i] != '\n'; i++ {
+ if data[i] == '|' {
+ pipes++
+ }
+ }
+
+ if pipes == 0 {
+ i = rowStart
+ break
+ }
+
+ // include the newline in data sent to tableRow
+ i++
+ p.tableRow(&body, data[rowStart:i], columns, false)
+ }
+
+ p.r.Table(out, header.Bytes(), body.Bytes(), columns)
+
+ return i
+}
+
+// check if the specified position is preceded by an odd number of backslashes
+func isBackslashEscaped(data []byte, i int) bool {
+ backslashes := 0
+ for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' {
+ backslashes++
+ }
+ return backslashes&1 == 1
+}
+
+func (p *parser) tableHeader(out *bytes.Buffer, data []byte) (size int, columns []int) {
+ i := 0
+ colCount := 1
+ for i = 0; data[i] != '\n'; i++ {
+ if data[i] == '|' && !isBackslashEscaped(data, i) {
+ colCount++
+ }
+ }
+
+ // doesn't look like a table header
+ if colCount == 1 {
+ return
+ }
+
+ // include the newline in the data sent to tableRow
+ header := data[:i+1]
+
+ // column count ignores pipes at beginning or end of line
+ if data[0] == '|' {
+ colCount--
+ }
+ if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) {
+ colCount--
+ }
+
+ columns = make([]int, colCount)
+
+ // move on to the header underline
+ i++
+ if i >= len(data) {
+ return
+ }
+
+ if data[i] == '|' && !isBackslashEscaped(data, i) {
+ i++
+ }
+ i = skipChar(data, i, ' ')
+
+ // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3
+ // and trailing | optional on last column
+ col := 0
+ for data[i] != '\n' {
+ dashes := 0
+
+ if data[i] == ':' {
+ i++
+ columns[col] |= TABLE_ALIGNMENT_LEFT
+ dashes++
+ }
+ for data[i] == '-' {
+ i++
+ dashes++
+ }
+ if data[i] == ':' {
+ i++
+ columns[col] |= TABLE_ALIGNMENT_RIGHT
+ dashes++
+ }
+ for data[i] == ' ' {
+ i++
+ }
+
+ // end of column test is messy
+ switch {
+ case dashes < 3:
+ // not a valid column
+ return
+
+ case data[i] == '|' && !isBackslashEscaped(data, i):
+ // marker found, now skip past trailing whitespace
+ col++
+ i++
+ for data[i] == ' ' {
+ i++
+ }
+
+ // trailing junk found after last column
+ if col >= colCount && data[i] != '\n' {
+ return
+ }
+
+ case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount:
+ // something else found where marker was required
+ return
+
+ case data[i] == '\n':
+ // marker is optional for the last column
+ col++
+
+ default:
+ // trailing junk found after last column
+ return
+ }
+ }
+ if col != colCount {
+ return
+ }
+
+ p.tableRow(out, header, columns, true)
+ size = i + 1
+ return
+}
+
+func (p *parser) tableRow(out *bytes.Buffer, data []byte, columns []int, header bool) {
+ i, col := 0, 0
+ var rowWork bytes.Buffer
+
+ if data[i] == '|' && !isBackslashEscaped(data, i) {
+ i++
+ }
+
+ for col = 0; col < len(columns) && i < len(data); col++ {
+ for data[i] == ' ' {
+ i++
+ }
+
+ cellStart := i
+
+ for (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' {
+ i++
+ }
+
+ cellEnd := i
+
+ // skip the end-of-cell marker, possibly taking us past end of buffer
+ i++
+
+ for cellEnd > cellStart && data[cellEnd-1] == ' ' {
+ cellEnd--
+ }
+
+ var cellWork bytes.Buffer
+ p.inline(&cellWork, data[cellStart:cellEnd])
+
+ if header {
+ p.r.TableHeaderCell(&rowWork, cellWork.Bytes(), columns[col])
+ } else {
+ p.r.TableCell(&rowWork, cellWork.Bytes(), columns[col])
+ }
+ }
+
+ // pad it out with empty columns to get the right number
+ for ; col < len(columns); col++ {
+ if header {
+ p.r.TableHeaderCell(&rowWork, nil, columns[col])
+ } else {
+ p.r.TableCell(&rowWork, nil, columns[col])
+ }
+ }
+
+ // silently ignore rows with too many cells
+
+ p.r.TableRow(out, rowWork.Bytes())
+}
+
+// returns blockquote prefix length
+func (p *parser) quotePrefix(data []byte) int {
+ i := 0
+ for i < 3 && data[i] == ' ' {
+ i++
+ }
+ if data[i] == '>' {
+ if data[i+1] == ' ' {
+ return i + 2
+ }
+ return i + 1
+ }
+ return 0
+}
+
+// blockquote ends with at least one blank line
+// followed by something without a blockquote prefix
+func (p *parser) terminateBlockquote(data []byte, beg, end int) bool {
+ if p.isEmpty(data[beg:]) <= 0 {
+ return false
+ }
+ if end >= len(data) {
+ return true
+ }
+ return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0
+}
+
+// parse a blockquote fragment
+func (p *parser) quote(out *bytes.Buffer, data []byte) int {
+ var raw bytes.Buffer
+ beg, end := 0, 0
+ for beg < len(data) {
+ end = beg
+ // Step over whole lines, collecting them. While doing that, check for
+ // fenced code and if one's found, incorporate it altogether,
+ // irregardless of any contents inside it
+ for data[end] != '\n' {
+ if p.flags&EXTENSION_FENCED_CODE != 0 {
+ if i := p.fencedCodeBlock(out, data[end:], false); i > 0 {
+ // -1 to compensate for the extra end++ after the loop:
+ end += i - 1
+ break
+ }
+ }
+ end++
+ }
+ end++
+
+ if pre := p.quotePrefix(data[beg:]); pre > 0 {
+ // skip the prefix
+ beg += pre
+ } else if p.terminateBlockquote(data, beg, end) {
+ break
+ }
+
+ // this line is part of the blockquote
+ raw.Write(data[beg:end])
+ beg = end
+ }
+
+ var cooked bytes.Buffer
+ p.block(&cooked, raw.Bytes())
+ p.r.BlockQuote(out, cooked.Bytes())
+ return end
+}
+
+// returns prefix length for block code
+func (p *parser) codePrefix(data []byte) int {
+ if data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' {
+ return 4
+ }
+ return 0
+}
+
+func (p *parser) code(out *bytes.Buffer, data []byte) int {
+ var work bytes.Buffer
+
+ i := 0
+ for i < len(data) {
+ beg := i
+ for data[i] != '\n' {
+ i++
+ }
+ i++
+
+ blankline := p.isEmpty(data[beg:i]) > 0
+ if pre := p.codePrefix(data[beg:i]); pre > 0 {
+ beg += pre
+ } else if !blankline {
+ // non-empty, non-prefixed line breaks the pre
+ i = beg
+ break
+ }
+
+ // verbatim copy to the working buffeu
+ if blankline {
+ work.WriteByte('\n')
+ } else {
+ work.Write(data[beg:i])
+ }
+ }
+
+ // trim all the \n off the end of work
+ workbytes := work.Bytes()
+ eol := len(workbytes)
+ for eol > 0 && workbytes[eol-1] == '\n' {
+ eol--
+ }
+ if eol != len(workbytes) {
+ work.Truncate(eol)
+ }
+
+ work.WriteByte('\n')
+
+ p.r.BlockCode(out, work.Bytes(), "")
+
+ return i
+}
+
+// returns unordered list item prefix
+func (p *parser) uliPrefix(data []byte) int {
+ i := 0
+
+ // start with up to 3 spaces
+ for i < 3 && data[i] == ' ' {
+ i++
+ }
+
+ // need a *, +, or - followed by a space
+ if (data[i] != '*' && data[i] != '+' && data[i] != '-') ||
+ data[i+1] != ' ' {
+ return 0
+ }
+ return i + 2
+}
+
+// returns ordered list item prefix
+func (p *parser) oliPrefix(data []byte) int {
+ i := 0
+
+ // start with up to 3 spaces
+ for i < 3 && data[i] == ' ' {
+ i++
+ }
+
+ // count the digits
+ start := i
+ for data[i] >= '0' && data[i] <= '9' {
+ i++
+ }
+
+ // we need >= 1 digits followed by a dot and a space
+ if start == i || data[i] != '.' || data[i+1] != ' ' {
+ return 0
+ }
+ return i + 2
+}
+
+// returns definition list item prefix
+func (p *parser) dliPrefix(data []byte) int {
+ i := 0
+
+ // need a : followed by a spaces
+ if data[i] != ':' || data[i+1] != ' ' {
+ return 0
+ }
+ for data[i] == ' ' {
+ i++
+ }
+ return i + 2
+}
+
+// parse ordered or unordered list block
+func (p *parser) list(out *bytes.Buffer, data []byte, flags int) int {
+ i := 0
+ flags |= LIST_ITEM_BEGINNING_OF_LIST
+ work := func() bool {
+ for i < len(data) {
+ skip := p.listItem(out, data[i:], &flags)
+ i += skip
+
+ if skip == 0 || flags&LIST_ITEM_END_OF_LIST != 0 {
+ break
+ }
+ flags &= ^LIST_ITEM_BEGINNING_OF_LIST
+ }
+ return true
+ }
+
+ p.r.List(out, work, flags)
+ return i
+}
+
+// Parse a single list item.
+// Assumes initial prefix is already removed if this is a sublist.
+func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int {
+ // keep track of the indentation of the first line
+ itemIndent := 0
+ for itemIndent < 3 && data[itemIndent] == ' ' {
+ itemIndent++
+ }
+
+ i := p.uliPrefix(data)
+ if i == 0 {
+ i = p.oliPrefix(data)
+ }
+ if i == 0 {
+ i = p.dliPrefix(data)
+ // reset definition term flag
+ if i > 0 {
+ *flags &= ^LIST_TYPE_TERM
+ }
+ }
+ if i == 0 {
+ // if in defnition list, set term flag and continue
+ if *flags&LIST_TYPE_DEFINITION != 0 {
+ *flags |= LIST_TYPE_TERM
+ } else {
+ return 0
+ }
+ }
+
+ // skip leading whitespace on first line
+ for data[i] == ' ' {
+ i++
+ }
+
+ // find the end of the line
+ line := i
+ for i > 0 && data[i-1] != '\n' {
+ i++
+ }
+
+ // get working buffer
+ var raw bytes.Buffer
+
+ // put the first line into the working buffer
+ raw.Write(data[line:i])
+ line = i
+
+ // process the following lines
+ containsBlankLine := false
+ sublist := 0
+
+gatherlines:
+ for line < len(data) {
+ i++
+
+ // find the end of this line
+ for data[i-1] != '\n' {
+ i++
+ }
+
+ // if it is an empty line, guess that it is part of this item
+ // and move on to the next line
+ if p.isEmpty(data[line:i]) > 0 {
+ containsBlankLine = true
+ raw.Write(data[line:i])
+ line = i
+ continue
+ }
+
+ // calculate the indentation
+ indent := 0
+ for indent < 4 && line+indent < i && data[line+indent] == ' ' {
+ indent++
+ }
+
+ chunk := data[line+indent : i]
+
+ // evaluate how this line fits in
+ switch {
+ // is this a nested list item?
+ case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) ||
+ p.oliPrefix(chunk) > 0 ||
+ p.dliPrefix(chunk) > 0:
+
+ if containsBlankLine {
+ // end the list if the type changed after a blank line
+ if indent <= itemIndent &&
+ ((*flags&LIST_TYPE_ORDERED != 0 && p.uliPrefix(chunk) > 0) ||
+ (*flags&LIST_TYPE_ORDERED == 0 && p.oliPrefix(chunk) > 0)) {
+
+ *flags |= LIST_ITEM_END_OF_LIST
+ break gatherlines
+ }
+ *flags |= LIST_ITEM_CONTAINS_BLOCK
+ }
+
+ // to be a nested list, it must be indented more
+ // if not, it is the next item in the same list
+ if indent <= itemIndent {
+ break gatherlines
+ }
+
+ // is this the first item in the nested list?
+ if sublist == 0 {
+ sublist = raw.Len()
+ }
+
+ // is this a nested prefix header?
+ case p.isPrefixHeader(chunk):
+ // if the header is not indented, it is not nested in the list
+ // and thus ends the list
+ if containsBlankLine && indent < 4 {
+ *flags |= LIST_ITEM_END_OF_LIST
+ break gatherlines
+ }
+ *flags |= LIST_ITEM_CONTAINS_BLOCK
+
+ // anything following an empty line is only part
+ // of this item if it is indented 4 spaces
+ // (regardless of the indentation of the beginning of the item)
+ case containsBlankLine && indent < 4:
+ if *flags&LIST_TYPE_DEFINITION != 0 && i < len(data)-1 {
+ // is the next item still a part of this list?
+ next := i
+ for data[next] != '\n' {
+ next++
+ }
+ for next < len(data)-1 && data[next] == '\n' {
+ next++
+ }
+ if i < len(data)-1 && data[i] != ':' && data[next] != ':' {
+ *flags |= LIST_ITEM_END_OF_LIST
+ }
+ } else {
+ *flags |= LIST_ITEM_END_OF_LIST
+ }
+ break gatherlines
+
+ // a blank line means this should be parsed as a block
+ case containsBlankLine:
+ *flags |= LIST_ITEM_CONTAINS_BLOCK
+ }
+
+ containsBlankLine = false
+
+ // add the line into the working buffer without prefix
+ raw.Write(data[line+indent : i])
+
+ line = i
+ }
+
+ // If reached end of data, the Renderer.ListItem call we're going to make below
+ // is definitely the last in the list.
+ if line >= len(data) {
+ *flags |= LIST_ITEM_END_OF_LIST
+ }
+
+ rawBytes := raw.Bytes()
+
+ // render the contents of the list item
+ var cooked bytes.Buffer
+ if *flags&LIST_ITEM_CONTAINS_BLOCK != 0 && *flags&LIST_TYPE_TERM == 0 {
+ // intermediate render of block item, except for definition term
+ if sublist > 0 {
+ p.block(&cooked, rawBytes[:sublist])
+ p.block(&cooked, rawBytes[sublist:])
+ } else {
+ p.block(&cooked, rawBytes)
+ }
+ } else {
+ // intermediate render of inline item
+ if sublist > 0 {
+ p.inline(&cooked, rawBytes[:sublist])
+ p.block(&cooked, rawBytes[sublist:])
+ } else {
+ p.inline(&cooked, rawBytes)
+ }
+ }
+
+ // render the actual list item
+ cookedBytes := cooked.Bytes()
+ parsedEnd := len(cookedBytes)
+
+ // strip trailing newlines
+ for parsedEnd > 0 && cookedBytes[parsedEnd-1] == '\n' {
+ parsedEnd--
+ }
+ p.r.ListItem(out, cookedBytes[:parsedEnd], *flags)
+
+ return line
+}
+
+// render a single paragraph that has already been parsed out
+func (p *parser) renderParagraph(out *bytes.Buffer, data []byte) {
+ if len(data) == 0 {
+ return
+ }
+
+ // trim leading spaces
+ beg := 0
+ for data[beg] == ' ' {
+ beg++
+ }
+
+ // trim trailing newline
+ end := len(data) - 1
+
+ // trim trailing spaces
+ for end > beg && data[end-1] == ' ' {
+ end--
+ }
+
+ work := func() bool {
+ p.inline(out, data[beg:end])
+ return true
+ }
+ p.r.Paragraph(out, work)
+}
+
+func (p *parser) paragraph(out *bytes.Buffer, data []byte) int {
+ // prev: index of 1st char of previous line
+ // line: index of 1st char of current line
+ // i: index of cursor/end of current line
+ var prev, line, i int
+
+ // keep going until we find something to mark the end of the paragraph
+ for i < len(data) {
+ // mark the beginning of the current line
+ prev = line
+ current := data[i:]
+ line = i
+
+ // did we find a blank line marking the end of the paragraph?
+ if n := p.isEmpty(current); n > 0 {
+ // did this blank line followed by a definition list item?
+ if p.flags&EXTENSION_DEFINITION_LISTS != 0 {
+ if i < len(data)-1 && data[i+1] == ':' {
+ return p.list(out, data[prev:], LIST_TYPE_DEFINITION)
+ }
+ }
+
+ p.renderParagraph(out, data[:i])
+ return i + n
+ }
+
+ // an underline under some text marks a header, so our paragraph ended on prev line
+ if i > 0 {
+ if level := p.isUnderlinedHeader(current); level > 0 {
+ // render the paragraph
+ p.renderParagraph(out, data[:prev])
+
+ // ignore leading and trailing whitespace
+ eol := i - 1
+ for prev < eol && data[prev] == ' ' {
+ prev++
+ }
+ for eol > prev && data[eol-1] == ' ' {
+ eol--
+ }
+
+ // render the header
+ // this ugly double closure avoids forcing variables onto the heap
+ work := func(o *bytes.Buffer, pp *parser, d []byte) func() bool {
+ return func() bool {
+ pp.inline(o, d)
+ return true
+ }
+ }(out, p, data[prev:eol])
+
+ id := ""
+ if p.flags&EXTENSION_AUTO_HEADER_IDS != 0 {
+ id = SanitizedAnchorName(string(data[prev:eol]))
+ }
+
+ p.r.Header(out, work, level, id)
+
+ // find the end of the underline
+ for data[i] != '\n' {
+ i++
+ }
+ return i
+ }
+ }
+
+ // if the next line starts a block of HTML, then the paragraph ends here
+ if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 {
+ if data[i] == '<' && p.html(out, current, false) > 0 {
+ // rewind to before the HTML block
+ p.renderParagraph(out, data[:i])
+ return i
+ }
+ }
+
+ // if there's a prefixed header or a horizontal rule after this, paragraph is over
+ if p.isPrefixHeader(current) || p.isHRule(current) {
+ p.renderParagraph(out, data[:i])
+ return i
+ }
+
+ // if there's a fenced code block, paragraph is over
+ if p.flags&EXTENSION_FENCED_CODE != 0 {
+ if p.fencedCodeBlock(out, current, false) > 0 {
+ p.renderParagraph(out, data[:i])
+ return i
+ }
+ }
+
+ // if there's a definition list item, prev line is a definition term
+ if p.flags&EXTENSION_DEFINITION_LISTS != 0 {
+ if p.dliPrefix(current) != 0 {
+ return p.list(out, data[prev:], LIST_TYPE_DEFINITION)
+ }
+ }
+
+ // if there's a list after this, paragraph is over
+ if p.flags&EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK != 0 {
+ if p.uliPrefix(current) != 0 ||
+ p.oliPrefix(current) != 0 ||
+ p.quotePrefix(current) != 0 ||
+ p.codePrefix(current) != 0 {
+ p.renderParagraph(out, data[:i])
+ return i
+ }
+ }
+
+ // otherwise, scan to the beginning of the next line
+ for data[i] != '\n' {
+ i++
+ }
+ i++
+ }
+
+ p.renderParagraph(out, data[:i])
+ return i
+}
+
+// SanitizedAnchorName returns a sanitized anchor name for the given text.
+//
+// It implements the algorithm specified in the package comment.
+func SanitizedAnchorName(text string) string {
+ var anchorName []rune
+ futureDash := false
+ for _, r := range text {
+ switch {
+ case unicode.IsLetter(r) || unicode.IsNumber(r):
+ if futureDash && len(anchorName) > 0 {
+ anchorName = append(anchorName, '-')
+ }
+ futureDash = false
+ anchorName = append(anchorName, unicode.ToLower(r))
+ default:
+ futureDash = true
+ }
+ }
+ return string(anchorName)
+}
diff --git a/vendor/github.com/russross/blackfriday/block_test.go b/vendor/github.com/russross/blackfriday/block_test.go
new file mode 100644
index 00000000..89d57754
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/block_test.go
@@ -0,0 +1,1781 @@
+//
+// Blackfriday Markdown Processor
+// Available at http://github.com/russross/blackfriday
+//
+// Copyright © 2011 Russ Ross Header 1
\n",
+
+ "## Header 2\n",
+ "Header 2
\n",
+
+ "### Header 3\n",
+ "Header 3
\n",
+
+ "#### Header 4\n",
+ "Header 4
\n",
+
+ "##### Header 5\n",
+ "Header 5
\n",
+
+ "###### Header 6\n",
+ "Header 6
\n",
+
+ "####### Header 7\n",
+ "# Header 7
\n",
+
+ "#Header 1\n",
+ "Header 1
\n",
+
+ "##Header 2\n",
+ "Header 2
\n",
+
+ "###Header 3\n",
+ "Header 3
\n",
+
+ "####Header 4\n",
+ "Header 4
\n",
+
+ "#####Header 5\n",
+ "Header 5
\n",
+
+ "######Header 6\n",
+ "Header 6
\n",
+
+ "#######Header 7\n",
+ "#Header 7
\n",
+
+ "Hello\n# Header 1\nGoodbye\n",
+ "Header 1
\n\n\n
\n",
+
+ "* List\n#Header\n* List\n",
+ "Header
\n
\n",
+
+ "* List\n * Nested list\n # Nested header\n",
+ "Header
\n
\n",
+
+ "#Header 1 \\#\n",
+ "\n
Nested header
Header 1 #
\n",
+
+ "#Header 1 \\# foo\n",
+ "Header 1 # foo
\n",
+
+ "#Header 1 #\\##\n",
+ "Header 1 ##
\n",
+ }
+ doTestsBlock(t, tests, 0)
+}
+
+func TestPrefixHeaderSpaceExtension(t *testing.T) {
+ var tests = []string{
+ "# Header 1\n",
+ "Header 1
\n",
+
+ "## Header 2\n",
+ "Header 2
\n",
+
+ "### Header 3\n",
+ "Header 3
\n",
+
+ "#### Header 4\n",
+ "Header 4
\n",
+
+ "##### Header 5\n",
+ "Header 5
\n",
+
+ "###### Header 6\n",
+ "Header 6
\n",
+
+ "####### Header 7\n",
+ "Header 1
\n\n\n
\n",
+
+ "* List\n#Header\n* List\n",
+ "Header
\n
\n",
+
+ "* List\n * Nested list\n # Nested header\n",
+ "\n
\n",
+ }
+ doTestsBlock(t, tests, EXTENSION_SPACE_HEADERS)
+}
+
+func TestPrefixHeaderIdExtension(t *testing.T) {
+ var tests = []string{
+ "# Header 1 {#someid}\n",
+ "\n
Nested header
Header 1
\n",
+
+ "# Header 1 {#someid} \n",
+ "Header 1
\n",
+
+ "# Header 1 {#someid}\n",
+ "Header 1
\n",
+
+ "# Header 1 {#someid\n",
+ "Header 1 {#someid
\n",
+
+ "# Header 1 {#someid\n",
+ "Header 1 {#someid
\n",
+
+ "# Header 1 {#someid}}\n",
+ "Header 1
\n\nHeader 2
\n",
+
+ "### Header 3 {#someid}\n",
+ "Header 3
\n",
+
+ "#### Header 4 {#someid}\n",
+ "Header 4
\n",
+
+ "##### Header 5 {#someid}\n",
+ "Header 5
\n",
+
+ "###### Header 6 {#someid}\n",
+ "Header 6
\n",
+
+ "####### Header 7 {#someid}\n",
+ "# Header 7
\n",
+
+ "# Header 1 # {#someid}\n",
+ "Header 1
\n",
+
+ "## Header 2 ## {#someid}\n",
+ "Header 2
\n",
+
+ "Hello\n# Header 1\nGoodbye\n",
+ "Header 1
\n\n\n
\n",
+
+ "* List\n#Header {#someid}\n* List\n",
+ "Header
\n
\n",
+
+ "* List\n * Nested list\n # Nested header {#someid}\n",
+ "Header
\n
\n",
+ }
+ doTestsBlock(t, tests, EXTENSION_HEADER_IDS)
+}
+
+func TestPrefixHeaderIdExtensionWithPrefixAndSuffix(t *testing.T) {
+ var tests = []string{
+ "# header 1 {#someid}\n",
+ "\n
Nested header
header 1
\n",
+
+ "## header 2 {#someid}\n",
+ "header 2
\n",
+
+ "### header 3 {#someid}\n",
+ "header 3
\n",
+
+ "#### header 4 {#someid}\n",
+ "header 4
\n",
+
+ "##### header 5 {#someid}\n",
+ "header 5
\n",
+
+ "###### header 6 {#someid}\n",
+ "header 6
\n",
+
+ "####### header 7 {#someid}\n",
+ "# header 7
\n",
+
+ "# header 1 # {#someid}\n",
+ "header 1
\n",
+
+ "## header 2 ## {#someid}\n",
+ "header 2
\n",
+
+ "* List\n# Header {#someid}\n* List\n",
+ "\n
\n",
+
+ "* List\n#Header {#someid}\n* List\n",
+ "Header
\n
\n",
+
+ "* List\n * Nested list\n # Nested header {#someid}\n",
+ "Header
\n
\n",
+ }
+
+ parameters := HtmlRendererParameters{
+ HeaderIDPrefix: "PRE:",
+ HeaderIDSuffix: ":POST",
+ }
+
+ doTestsBlockWithRunner(t, tests, EXTENSION_HEADER_IDS, runnerWithRendererParameters(parameters))
+}
+
+func TestPrefixAutoHeaderIdExtension(t *testing.T) {
+ var tests = []string{
+ "# Header 1\n",
+ "\n
Nested header
Header 1
\n",
+
+ "# Header 1 \n",
+ "Header 1
\n",
+
+ "## Header 2\n",
+ "Header 2
\n",
+
+ "### Header 3\n",
+ "Header 3
\n",
+
+ "#### Header 4\n",
+ "Header 4
\n",
+
+ "##### Header 5\n",
+ "Header 5
\n",
+
+ "###### Header 6\n",
+ "Header 6
\n",
+
+ "####### Header 7\n",
+ "# Header 7
\n",
+
+ "Hello\n# Header 1\nGoodbye\n",
+ "Header 1
\n\n\n
\n",
+
+ "* List\n#Header\n* List\n",
+ "Header
\n
\n",
+
+ "* List\n * Nested list\n # Nested header\n",
+ "Header
\n
\n",
+
+ "# Header\n\n# Header\n",
+ "\n
Nested header
Header
\n\nHeader
\n",
+
+ "# Header 1\n\n# Header 1",
+ "Header 1
\n\nHeader 1
\n",
+
+ "# Header\n\n# Header 1\n\n# Header\n\n# Header",
+ "Header
\n\nHeader 1
\n\nHeader
\n\nHeader
\n",
+ }
+ doTestsBlock(t, tests, EXTENSION_AUTO_HEADER_IDS)
+}
+
+func TestPrefixAutoHeaderIdExtensionWithPrefixAndSuffix(t *testing.T) {
+ var tests = []string{
+ "# Header 1\n",
+ "Header 1
\n",
+
+ "# Header 1 \n",
+ "Header 1
\n",
+
+ "## Header 2\n",
+ "Header 2
\n",
+
+ "### Header 3\n",
+ "Header 3
\n",
+
+ "#### Header 4\n",
+ "Header 4
\n",
+
+ "##### Header 5\n",
+ "Header 5
\n",
+
+ "###### Header 6\n",
+ "Header 6
\n",
+
+ "####### Header 7\n",
+ "# Header 7
\n",
+
+ "Hello\n# Header 1\nGoodbye\n",
+ "Header 1
\n\n\n
\n",
+
+ "* List\n#Header\n* List\n",
+ "Header
\n
\n",
+
+ "* List\n * Nested list\n # Nested header\n",
+ "Header
\n
\n",
+
+ "# Header\n\n# Header\n",
+ "\n
Nested header
Header
\n\nHeader
\n",
+
+ "# Header 1\n\n# Header 1",
+ "Header 1
\n\nHeader 1
\n",
+
+ "# Header\n\n# Header 1\n\n# Header\n\n# Header",
+ "Header
\n\nHeader 1
\n\nHeader
\n\nHeader
\n",
+ }
+
+ parameters := HtmlRendererParameters{
+ HeaderIDPrefix: "PRE:",
+ HeaderIDSuffix: ":POST",
+ }
+
+ doTestsBlockWithRunner(t, tests, EXTENSION_AUTO_HEADER_IDS, runnerWithRendererParameters(parameters))
+}
+
+func TestPrefixMultipleHeaderExtensions(t *testing.T) {
+ var tests = []string{
+ "# Header\n\n# Header {#header}\n\n# Header 1",
+ "Header
\n\nHeader
\n\nHeader 1
\n",
+ }
+ doTestsBlock(t, tests, EXTENSION_AUTO_HEADER_IDS|EXTENSION_HEADER_IDS)
+}
+
+func TestUnderlineHeaders(t *testing.T) {
+ var tests = []string{
+ "Header 1\n========\n",
+ "Header 1
\n",
+
+ "Header 2\n--------\n",
+ "Header 2
\n",
+
+ "A\n=\n",
+ "A
\n",
+
+ "B\n-\n",
+ "B
\n",
+
+ "Paragraph\nHeader\n=\n",
+ "Header
\n",
+
+ "Header\n===\nParagraph\n",
+ "Header
\n\nHeader
\n\nAnother header
\n",
+
+ " Header\n======\n",
+ "Header
\n",
+
+ " Code\n========\n",
+ "
\n\nCode\n
Header with inline
\n",
+
+ "* List\n * Sublist\n Not a header\n ------\n",
+ "\n
\n",
+
+ "Paragraph\n\n\n\n\nHeader\n===\n",
+ "\n
Header
\n",
+
+ "Trailing space \n==== \n\n",
+ "Trailing space
\n",
+
+ "Trailing spaces\n==== \n\n",
+ "Trailing spaces
\n",
+
+ "Double underline\n=====\n=====\n",
+ "Double underline
\n\nHeader 1
\n",
+
+ "Header 2\n--------\n",
+ "Header 2
\n",
+
+ "A\n=\n",
+ "A
\n",
+
+ "B\n-\n",
+ "B
\n",
+
+ "Paragraph\nHeader\n=\n",
+ "Header
\n",
+
+ "Header\n===\nParagraph\n",
+ "Header
\n\nHeader
\n\nAnother header
\n",
+
+ " Header\n======\n",
+ "Header
\n",
+
+ "Header with *inline*\n=====\n",
+ "Header with inline
\n",
+
+ "Paragraph\n\n\n\n\nHeader\n===\n",
+ "Header
\n",
+
+ "Trailing space \n==== \n\n",
+ "Trailing space
\n",
+
+ "Trailing spaces\n==== \n\n",
+ "Trailing spaces
\n",
+
+ "Double underline\n=====\n=====\n",
+ "Double underline
\n\nHeader
\n\nHeader
\n",
+
+ "Header 1\n========\n\nHeader 1\n========\n",
+ "Header 1
\n\nHeader 1
\n",
+ }
+ doTestsBlock(t, tests, EXTENSION_AUTO_HEADER_IDS)
+}
+
+func TestHorizontalRule(t *testing.T) {
+ var tests = []string{
+ "-\n",
+ "
\n",
+
+ "----\n",
+ "
\n",
+
+ "*\n",
+ "
\n",
+
+ "****\n",
+ "
\n",
+
+ "_\n",
+ "
\n",
+
+ "____\n",
+ "
\n",
+
+ "-*-\n",
+ "
\n",
+
+ "* * *\n",
+ "
\n",
+
+ "_ _ _\n",
+ "
\n",
+
+ "-----*\n",
+ "
\n",
+
+ "Hello\n***\n",
+ "
\n",
+
+ "---\n***\n___\n",
+ "
\n\n
\n\n
\n",
+ }
+ doTestsBlock(t, tests, 0)
+}
+
+func TestUnorderedList(t *testing.T) {
+ var tests = []string{
+ "* Hello\n",
+ "\n
\n",
+
+ "* Yin\n* Yang\n",
+ "\n
\n",
+
+ "* Ting\n* Bong\n* Goo\n",
+ "\n
\n",
+
+ "* Yin\n\n* Yang\n",
+ "\n
\n",
+
+ "* Ting\n\n* Bong\n* Goo\n",
+ "\n
\n",
+
+ "+ Hello\n",
+ "\n
\n",
+
+ "+ Yin\n+ Yang\n",
+ "\n
\n",
+
+ "+ Ting\n+ Bong\n+ Goo\n",
+ "\n
\n",
+
+ "+ Yin\n\n+ Yang\n",
+ "\n
\n",
+
+ "+ Ting\n\n+ Bong\n+ Goo\n",
+ "\n
\n",
+
+ "- Hello\n",
+ "\n
\n",
+
+ "- Yin\n- Yang\n",
+ "\n
\n",
+
+ "- Ting\n- Bong\n- Goo\n",
+ "\n
\n",
+
+ "- Yin\n\n- Yang\n",
+ "\n
\n",
+
+ "- Ting\n\n- Bong\n- Goo\n",
+ "\n
\n",
+
+ "*Hello\n",
+ "\n
\n",
+
+ "* Hello \n Next line \n",
+ "\n
\n",
+
+ "Paragraph\n* No linebreak\n",
+ "\n
\n",
+
+ "* List\n\n1. Spacer Mixed listing\n",
+ "\n
\n\n\n
\n",
+
+ "* List\n * Nested list\n",
+ "\n
\n",
+
+ "* List\n\n * Nested list\n",
+ "\n
\n
\n",
+
+ "* List\n Second line\n\n + Nested\n",
+ "\n
\n
\n",
+
+ "* List\n + Nested\n\n Continued\n",
+ "\n
\n
\n",
+
+ "* List\n * shallow indent\n",
+ "\n
\n\n\n
\n",
+
+ "* List\n" +
+ " * shallow indent\n" +
+ " * part of second list\n" +
+ " * still second\n" +
+ " * almost there\n" +
+ " * third level\n",
+ "\n
\n" +
+ "
\n",
+
+ "* List\n extra indent, same paragraph\n",
+ "\n" +
+ "
\n" +
+ "
\n
\n",
+
+ "* List\n\n code block\n",
+ "\n
\n",
+
+ "* List\n\n code block with spaces\n",
+ "code block\n
\n
\n",
+
+ "* List\n\n * sublist\n\n normal text\n\n * another sublist\n",
+ " code block with spaces\n
\n
\n",
+
+ `* Foo
+
+ bar
+
+ qux
+`,
+ `\n
\n\n\n
+
+`,
+ }
+ doTestsBlock(t, tests, 0)
+}
+
+func TestFencedCodeBlockWithinList(t *testing.T) {
+ doTestsBlock(t, []string{
+ "* Foo\n\n ```\n bar\n\n qux\n ```\n",
+ `bar
+
+qux
+
+
+`,
+ }, EXTENSION_FENCED_CODE)
+}
+
+func TestOrderedList(t *testing.T) {
+ var tests = []string{
+ "1. Hello\n",
+ "bar
+
+qux
+
\n
\n",
+
+ "1. Yin\n2. Yang\n",
+ "\n
\n",
+
+ "1. Ting\n2. Bong\n3. Goo\n",
+ "\n
\n",
+
+ "1. Yin\n\n2. Yang\n",
+ "\n
\n",
+
+ "1. Ting\n\n2. Bong\n3. Goo\n",
+ "\n
\n",
+
+ "1 Hello\n",
+ "\n
\n",
+
+ "1. Hello \n Next line \n",
+ "\n
\n",
+
+ "Paragraph\n1. No linebreak\n",
+ "\n
\n",
+
+ "1. List\n 1. Nested list\n",
+ "\n
\n",
+
+ "1. List\n\n 1. Nested list\n",
+ "\n
\n
\n",
+
+ "1. List\n Second line\n\n 1. Nested\n",
+ "\n
\n
\n",
+
+ "1. List\n 1. Nested\n\n Continued\n",
+ "\n
\n
\n",
+
+ "1. List\n 1. shallow indent\n",
+ "\n
\n\n\n
\n",
+
+ "1. List\n" +
+ " 1. shallow indent\n" +
+ " 2. part of second list\n" +
+ " 3. still second\n" +
+ " 4. almost there\n" +
+ " 1. third level\n",
+ "\n
\n" +
+ "
\n",
+
+ "1. List\n extra indent, same paragraph\n",
+ "\n" +
+ "
\n" +
+ "
\n
\n",
+
+ "1. List\n\n code block\n",
+ "\n
\n",
+
+ "1. List\n\n code block with spaces\n",
+ "code block\n
\n
\n",
+
+ "1. List\n\n* Spacer Mixed listing\n",
+ " code block with spaces\n
\n
\n\n\n
\n",
+
+ "1. List\n* Mixed listing\n",
+ "\n
\n",
+
+ "1. List\n * Mixted list\n",
+ "\n
\n",
+
+ "1. List\n * Mixed list\n",
+ "\n
\n
\n",
+
+ "* Start with unordered\n 1. Ordered\n",
+ "\n
\n
\n",
+
+ "* Start with unordered\n 1. Ordered\n",
+ "\n
\n
\n",
+
+ "1. numbers\n1. are ignored\n",
+ "\n
\n
\n",
+
+ `1. Foo
+
+ bar
+
+
+
+ qux
+`,
+ `
+
+`,
+ }
+ doTestsBlock(t, tests, 0)
+}
+
+func TestDefinitionList(t *testing.T) {
+ var tests = []string{
+ "Term 1\n: Definition a\n",
+ "bar
+
+
+
+qux
+
\n
\n",
+
+ "Term 1\n: Definition a \n",
+ "\n
\n",
+
+ "Term 1\n: Definition a\n: Definition b\n",
+ "\n
\n",
+
+ "Term 1\n: Definition a\n\nTerm 2\n: Definition b\n",
+ "\n" +
+ "
\n",
+
+ "Term 1\n: Definition a\n\nTerm 2\n: Definition b\n\nTerm 3\n: Definition c\n",
+ "\n" +
+ "
\n",
+
+ "Term 1\n: Definition a\n: Definition b\n\nTerm 2\n: Definition c\n",
+ "\n" +
+ "
\n",
+
+ "Term 1\n\n: Definition a\n\nTerm 2\n\n: Definition b\n",
+ "\n" +
+ "
\n",
+
+ "Term 1\n\n: Definition a\n\n: Definition b\n\nTerm 2\n\n: Definition c\n",
+ "\n" +
+ "
\n",
+
+ "Term 1\n: Definition a\nNext line\n",
+ "\n
\n",
+
+ "Term 1\n: Definition a\n Next line\n",
+ "\n
\n",
+
+ "Term 1\n: Definition a \n Next line \n",
+ "\n
\n",
+
+ "Term 1\n: Definition a\nNext line\n\nTerm 2\n: Definition b",
+ "\n" +
+ "
\n",
+
+ "Term 1\n: Definition a\n",
+ "\n
\n",
+
+ "Term 1\n:Definition a\n",
+ "\n" +
+ "
\n" +
+ "\n\n" +
+ "
\n" +
+ "\n\n" +
+ "
\n" +
+ "\n\n" +
+ "
\n",
+ }
+ doTestsBlock(t, tests, EXTENSION_DEFINITION_LISTS)
+}
+
+func TestPreformattedHtml(t *testing.T) {
+ var tests = []string{
+ "\n",
+ "\n",
+
+ "\n
Paragraph\n
Paragraph
\n\nParagraph\n
Paragraph
\n\nParagraph\n
And here?
\n", + + "Paragraph\n\nParagraph
\n\nAnd here?
\n", + } + doTestsBlock(t, tests, 0) +} + +func TestPreformattedHtmlLax(t *testing.T) { + var tests = []string{ + "Paragraph\nParagraph
\n\nParagraph
\n\nParagraph
\n\nAnd here?
\n", + + "Paragraph\n\nParagraph
\n\nAnd here?
\n", + + "Paragraph\nParagraph
\n\nAnd here?
\n", + + "Paragraph\n\nParagraph
\n\nAnd here?
\n", + } + doTestsBlock(t, tests, EXTENSION_LAX_HTML_BLOCKS) +} + +func TestFencedCodeBlock(t *testing.T) { + var tests = []string{ + "``` go\nfunc foo() bool {\n\treturn true;\n}\n```\n", + "func foo() bool {\n\treturn true;\n}\n
\n",
+
+ "``` c\n/* special & char < > \" escaping */\n```\n",
+ "/* special & char < > " escaping */\n
\n",
+
+ "``` c\nno *inline* processing ~~of text~~\n```\n",
+ "no *inline* processing ~~of text~~\n
\n",
+
+ "```\nNo language\n```\n",
+ "No language\n
\n",
+
+ "``` {ocaml}\nlanguage in braces\n```\n",
+ "language in braces\n
\n",
+
+ "``` {ocaml} \nwith extra whitespace\n```\n",
+ "with extra whitespace\n
\n",
+
+ "```{ ocaml }\nwith extra whitespace\n```\n",
+ "with extra whitespace\n
\n",
+
+ "~ ~~ java\nWith whitespace\n~~~\n",
+ "~ ~~ java\nWith whitespace\n~~~
\n", + + "~~\nonly two\n~~\n", + "~~\nonly two\n~~
\n", + + "```` python\nextra\n````\n", + "extra\n
\n",
+
+ "~~~ perl\nthree to start, four to end\n~~~~\n",
+ "~~~ perl\nthree to start, four to end\n~~~~
\n", + + "~~~~ perl\nfour to start, three to end\n~~~\n", + "~~~~ perl\nfour to start, three to end\n~~~
\n", + + "~~~ bash\ntildes\n~~~\n", + "tildes\n
\n",
+
+ "``` lisp\nno ending\n",
+ "``` lisp\nno ending
\n", + + "~~~ lisp\nend with language\n~~~ lisp\n", + "~~~ lisp\nend with language\n~~~ lisp
\n", + + "```\nmismatched begin and end\n~~~\n", + "```\nmismatched begin and end\n~~~
\n", + + "~~~\nmismatched begin and end\n```\n", + "~~~\nmismatched begin and end\n```
\n", + + " ``` oz\nleading spaces\n```\n", + "leading spaces\n
\n",
+
+ " ``` oz\nleading spaces\n ```\n",
+ "leading spaces\n
\n",
+
+ " ``` oz\nleading spaces\n ```\n",
+ "leading spaces\n
\n",
+
+ "``` oz\nleading spaces\n ```\n",
+ "leading spaces\n
\n",
+
+ " ``` oz\nleading spaces\n ```\n",
+ "``` oz\n
\n\nleading spaces\n ```
\n", + + "Bla bla\n\n``` oz\ncode blocks breakup paragraphs\n```\n\nBla Bla\n", + "Bla bla
\n\ncode blocks breakup paragraphs\n
\n\nBla Bla
\n", + + "Some text before a fenced code block\n``` oz\ncode blocks breakup paragraphs\n```\nAnd some text after a fenced code block", + "Some text before a fenced code block
\n\ncode blocks breakup paragraphs\n
\n\nAnd some text after a fenced code block
\n", + + "`", + "`
\n", + + "Bla bla\n\n``` oz\ncode blocks breakup paragraphs\n```\n\nBla Bla\n\n``` oz\nmultiple code blocks work okay\n```\n\nBla Bla\n", + "Bla bla
\n\ncode blocks breakup paragraphs\n
\n\nBla Bla
\n\nmultiple code blocks work okay\n
\n\nBla Bla
\n", + + "Some text before a fenced code block\n``` oz\ncode blocks breakup paragraphs\n```\nSome text in between\n``` oz\nmultiple code blocks work okay\n```\nAnd some text after a fenced code block", + "Some text before a fenced code block
\n\ncode blocks breakup paragraphs\n
\n\nSome text in between
\n\nmultiple code blocks work okay\n
\n\nAnd some text after a fenced code block
\n", + + "```\n[]:()\n```\n", + "[]:()\n
\n",
+
+ "```\n[]:()\n[]:)\n[]:(\n[]:x\n[]:testing\n[:testing\n\n[]:\nlinebreak\n[]()\n\n[]:\n[]()\n```",
+ "[]:()\n[]:)\n[]:(\n[]:x\n[]:testing\n[:testing\n\n[]:\nlinebreak\n[]()\n\n[]:\n[]()\n
\n",
+ }
+ doTestsBlock(t, tests, EXTENSION_FENCED_CODE)
+}
+
+func TestFencedCodeInsideBlockquotes(t *testing.T) {
+ cat := func(s ...string) string { return strings.Join(s, "\n") }
+ var tests = []string{
+ cat("> ```go",
+ "package moo",
+ "",
+ "```",
+ ""),
+ `++`, + // ------------------------------------------- + cat("> foo", + "> ", + "> ```go", + "package moo", + "```", + "> ", + "> goo.", + ""), + `+package moo + +
++`, + // ------------------------------------------- + cat("> foo", + "> ", + "> quote", + "continues", + "```", + ""), + `foo
+ ++ +package moo +
goo.
+
++`, + // ------------------------------------------- + cat("> foo", + "> ", + "> ```go", + "package moo", + "```", + "> ", + "> goo.", + "> ", + "> ```go", + "package zoo", + "```", + "> ", + "> woo.", + ""), + `foo
+ +quote +continues +` + "```" + `
+
++`, + } + + // These 2 alternative forms of blockquoted fenced code blocks should produce same output. + forms := [2]string{ + cat("> plain quoted text", + "> ```fenced", + "code", + " with leading single space correctly preserved", + "okay", + "```", + "> rest of quoted text"), + cat("> plain quoted text", + "> ```fenced", + "> code", + "> with leading single space correctly preserved", + "> okay", + "> ```", + "> rest of quoted text"), + } + want := `foo
+ ++ +package moo +
goo.
+ ++ +package zoo +
woo.
+
++` + tests = append(tests, forms[0], want) + tests = append(tests, forms[1], want) + + doTestsBlock(t, tests, EXTENSION_FENCED_CODE) +} + +func TestTable(t *testing.T) { + var tests = []string{ + "a | b\n---|---\nc | d\n", + "plain quoted text
+ ++ +code + with leading single space correctly preserved +okay +
rest of quoted text
+
a | \nb | \n
---|---|
c | \nd | \n
a | b\n---|--\nc | d
\n", + + "|a|b|c|d|\n|----|----|----|---|\n|e|f|g|h|\n", + "a | \nb | \nc | \nd | \n
---|---|---|---|
e | \nf | \ng | \nh | \n
a | \nb | \nc | \nd | \n
---|---|---|---|
e | \nf | \ng | \nh | \n
a | \nb | \nc | \n
---|---|---|
d | \ne | \nf | \n
g | \nh | \n\n |
i | \nj | \nk | \n
n | \no | \np | \n
a | \nb | \nc | \n
---|---|---|
d | \ne | \nf | \n
a | \nb | \n" + + "c | \nd | \n
---|---|---|---|
e | \nf | \n" + + "g | \nh | \n
a | \nb | \nc | \n
---|
a | \nb | \nc | \nd | \ne | \n
---|---|---|---|---|
f | \ng | \nh | \ni | \nj | \n
a | \nb|c | \nd | \n
---|---|---|
f | \ng|h | \ni | \n
Yin
Yang
Ting
Bong
Goo
Yin
Yang
Ting
Bong
Goo
Yin
Yang
Ting
Bong
Goo
*Hello
\n", + + "* Hello \n", + "Paragraph
\n\nParagraph
\n\nList
\n\nList\nSecond line
\n\nList
\n\nContinued
List
\n\ncode block\n
List
\n\n code block with spaces\n
List
\n\nnormal text
\n\nYin
Yang
Ting
Bong
Goo
1 Hello
\n", + + "1.Hello\n", + "1.Hello
\n", + + "1. Hello \n", + "Paragraph
\n\nParagraph
\n\nList
\n\nList\nSecond line
\n\nList
\n\nContinued
List
\n\ncode block\n
List
\n\n code block with spaces\n
func foo() bool {\n\treturn true;\n}\n
\n",
+
+ "``` c\n/* special & char < > \" escaping */\n```\n",
+ "/* special & char < > " escaping */\n
\n",
+
+ "``` c\nno *inline* processing ~~of text~~\n```\n",
+ "no *inline* processing ~~of text~~\n
\n",
+
+ "```\nNo language\n```\n",
+ "No language\n
\n",
+
+ "``` {ocaml}\nlanguage in braces\n```\n",
+ "language in braces\n
\n",
+
+ "``` {ocaml} \nwith extra whitespace\n```\n",
+ "with extra whitespace\n
\n",
+
+ "```{ ocaml }\nwith extra whitespace\n```\n",
+ "with extra whitespace\n
\n",
+
+ "~ ~~ java\nWith whitespace\n~~~\n",
+ "~ ~~ java\nWith whitespace\n~~~
\n", + + "~~\nonly two\n~~\n", + "~~\nonly two\n~~
\n", + + "```` python\nextra\n````\n", + "extra\n
\n",
+
+ "~~~ perl\nthree to start, four to end\n~~~~\n",
+ "~~~ perl\nthree to start, four to end\n~~~~
\n", + + "~~~~ perl\nfour to start, three to end\n~~~\n", + "~~~~ perl\nfour to start, three to end\n~~~
\n", + + "~~~ bash\ntildes\n~~~\n", + "tildes\n
\n",
+
+ "``` lisp\nno ending\n",
+ "``` lisp\nno ending
\n", + + "~~~ lisp\nend with language\n~~~ lisp\n", + "~~~ lisp\nend with language\n~~~ lisp
\n", + + "```\nmismatched begin and end\n~~~\n", + "```\nmismatched begin and end\n~~~
\n", + + "~~~\nmismatched begin and end\n```\n", + "~~~\nmismatched begin and end\n```
\n", + + " ``` oz\nleading spaces\n```\n", + "leading spaces\n
\n",
+
+ " ``` oz\nleading spaces\n ```\n",
+ "leading spaces\n
\n",
+
+ " ``` oz\nleading spaces\n ```\n",
+ "leading spaces\n
\n",
+
+ "``` oz\nleading spaces\n ```\n",
+ "leading spaces\n
\n",
+
+ " ``` oz\nleading spaces\n ```\n",
+ "``` oz\n
\n\nleading spaces
\n\n```\n
\n",
+ }
+ doTestsBlock(t, tests, EXTENSION_FENCED_CODE|EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK)
+}
+
+func TestTitleBlock_EXTENSION_TITLEBLOCK(t *testing.T) {
+ var tests = []string{
+ "% Some title\n" +
+ "% Another title line\n" +
+ "% Yep, more here too\n",
+ "Some text
\n\n\n", + + "Some text\n\n\n", + "Some text
\n\n\n", + + "Some text\n\n\n", + "Some text
\n\n\n", + } + doTestsBlock(t, tests, 0) +} + +func TestCDATA(t *testing.T) { + var tests = []string{ + "Some text\n\n\n", + "Some text
\n\n\n", + + "CDATA ]]\n\n\n", + "CDATA ]]
\n\n\n", + + "CDATA >\n\n]]>\n", + "CDATA >
\n\n]]>\n", + + "Lots of text\n\nLots of text
\n\n<![CDATA[foo]]>\n
\n",
+
+ "\n",
+ "\n",
+
+ ` def func():
+> pass
+]]>
+`,
+ ` def func():
+> pass
+]]>
+`,
+ }, EXTENSION_FENCED_CODE)
+}
+
+func TestIsFenceLine(t *testing.T) {
+ tests := []struct {
+ data []byte
+ syntaxRequested bool
+ newlineOptional bool
+ wantEnd int
+ wantMarker string
+ wantSyntax string
+ }{
+ {
+ data: []byte("```"),
+ wantEnd: 0,
+ },
+ {
+ data: []byte("```\nstuff here\n"),
+ wantEnd: 4,
+ wantMarker: "```",
+ },
+ {
+ data: []byte("```\nstuff here\n"),
+ syntaxRequested: true,
+ wantEnd: 4,
+ wantMarker: "```",
+ },
+ {
+ data: []byte("stuff here\n```\n"),
+ wantEnd: 0,
+ },
+ {
+ data: []byte("```"),
+ newlineOptional: true,
+ wantEnd: 3,
+ wantMarker: "```",
+ },
+ {
+ data: []byte("```"),
+ syntaxRequested: true,
+ newlineOptional: true,
+ wantEnd: 3,
+ wantMarker: "```",
+ },
+ {
+ data: []byte("``` go"),
+ syntaxRequested: true,
+ newlineOptional: true,
+ wantEnd: 6,
+ wantMarker: "```",
+ wantSyntax: "go",
+ },
+ }
+
+ for _, test := range tests {
+ var syntax *string
+ if test.syntaxRequested {
+ syntax = new(string)
+ }
+ end, marker := isFenceLine(test.data, syntax, "```", test.newlineOptional)
+ if got, want := end, test.wantEnd; got != want {
+ t.Errorf("got end %v, want %v", got, want)
+ }
+ if got, want := marker, test.wantMarker; got != want {
+ t.Errorf("got marker %q, want %q", got, want)
+ }
+ if test.syntaxRequested {
+ if got, want := *syntax, test.wantSyntax; got != want {
+ t.Errorf("got syntax %q, want %q", got, want)
+ }
+ }
+ }
+}
+
+func TestJoinLines(t *testing.T) {
+ input := `# 标题
+
+第一
+行文字。
+
+第
+二
+行文字。
+`
+ result := `第一行文字。
+ +第二行文字。
+` + opt := Options{Extensions: commonExtensions | EXTENSION_JOIN_LINES} + renderer := HtmlRenderer(commonHtmlFlags, "", "") + output := MarkdownOptions([]byte(input), renderer, opt) + + if string(output) != result { + t.Error("output dose not match.") + } +} + +func TestSanitizedAnchorName(t *testing.T) { + tests := []struct { + text string + want string + }{ + { + text: "This is a header", + want: "this-is-a-header", + }, + { + text: "This is also a header", + want: "this-is-also-a-header", + }, + { + text: "main.go", + want: "main-go", + }, + { + text: "Article 123", + want: "article-123", + }, + { + text: "<- Let's try this, shall we?", + want: "let-s-try-this-shall-we", + }, + { + text: " ", + want: "", + }, + { + text: "Hello, 世界", + want: "hello-世界", + }, + } + for _, test := range tests { + if got := SanitizedAnchorName(test.text); got != test.want { + t.Errorf("SanitizedAnchorName(%q):\ngot %q\nwant %q", test.text, got, test.want) + } + } +} diff --git a/vendor/github.com/russross/blackfriday/doc.go b/vendor/github.com/russross/blackfriday/doc.go new file mode 100644 index 00000000..9656c42a --- /dev/null +++ b/vendor/github.com/russross/blackfriday/doc.go @@ -0,0 +1,32 @@ +// Package blackfriday is a Markdown processor. +// +// It translates plain text with simple formatting rules into HTML or LaTeX. +// +// Sanitized Anchor Names +// +// Blackfriday includes an algorithm for creating sanitized anchor names +// corresponding to a given input text. This algorithm is used to create +// anchors for headings when EXTENSION_AUTO_HEADER_IDS is enabled. The +// algorithm is specified below, so that other packages can create +// compatible anchor names and links to those anchors. +// +// The algorithm iterates over the input text, interpreted as UTF-8, +// one Unicode code point (rune) at a time. All runes that are letters (category L) +// or numbers (category N) are considered valid characters. They are mapped to +// lower case, and included in the output. All other runes are considered +// invalid characters. Invalid characters that preceed the first valid character, +// as well as invalid character that follow the last valid character +// are dropped completely. All other sequences of invalid characters +// between two valid characters are replaced with a single dash character '-'. +// +// SanitizedAnchorName exposes this functionality, and can be used to +// create compatible links to the anchor names generated by blackfriday. +// This algorithm is also implemented in a small standalone package at +// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients +// that want a small package and don't need full functionality of blackfriday. +package blackfriday + +// NOTE: Keep Sanitized Anchor Name algorithm in sync with package +// github.com/shurcooL/sanitized_anchor_name. +// Otherwise, users of sanitized_anchor_name will get anchor names +// that are incompatible with those generated by blackfriday. diff --git a/vendor/github.com/russross/blackfriday/html.go b/vendor/github.com/russross/blackfriday/html.go new file mode 100644 index 00000000..74e67ee8 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/html.go @@ -0,0 +1,949 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross