X-Git-Url: http://git.samba.org/samba.git/?p=jelmer%2Fdulwich-libgit2.git;a=blobdiff_plain;f=dulwich%2Fobjects.py;h=e4a53c14ab920f0ce556131ad4b3d820c2a26f01;hp=44b1eb05d780f1dfd6d3da36c97acf3be7ccc27c;hb=f4341b4283f82dc3aeaff1444e709da21dd98f2e;hpb=1872cb575175fb924497fb272605450dcdf05134 diff --git a/dulwich/objects.py b/dulwich/objects.py index 44b1eb0..e4a53c1 100644 --- a/dulwich/objects.py +++ b/dulwich/objects.py @@ -1,17 +1,17 @@ # objects.py -- Access to base git objects # Copyright (C) 2007 James Westby -# Copyright (C) 2008 Jelmer Vernooij -# +# Copyright (C) 2008-2009 Jelmer Vernooij +# # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2 # of the License or (at your option) a later version of the License. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, @@ -21,9 +21,14 @@ """Access to base git objects.""" +import binascii +from cStringIO import ( + StringIO, + ) import mmap import os -import sha +import stat +import time import zlib from dulwich.errors import ( @@ -31,6 +36,10 @@ from dulwich.errors import ( NotCommitError, NotTreeError, ) +from dulwich.file import GitFile +from dulwich.misc import ( + make_sha, + ) BLOB_ID = "blob" TAG_ID = "tag" @@ -42,6 +51,12 @@ COMMITTER_ID = "committer" OBJECT_ID = "object" TYPE_ID = "type" TAGGER_ID = "tagger" +ENCODING_ID = "encoding" + +S_IFGITLINK = 0160000 + +def S_ISGITLINK(m): + return (stat.S_IFMT(m) == S_IFGITLINK) def _decompress(string): dcomp = zlib.decompressobj() @@ -52,7 +67,7 @@ def _decompress(string): def sha_to_hex(sha): """Takes a string and returns the hex of the sha within""" - hexsha = "".join(["%02x" % ord(c) for c in sha]) + hexsha = binascii.hexlify(sha) assert len(hexsha) == 40, "Incorrect length of sha1 string: %d" % hexsha return hexsha @@ -60,12 +75,23 @@ def sha_to_hex(sha): def hex_to_sha(hex): """Takes a hex sha and returns a binary sha""" assert len(hex) == 40, "Incorrent length of hexsha: %s" % hex - return ''.join([chr(int(hex[i:i+2], 16)) for i in xrange(0, len(hex), 2)]) + return binascii.unhexlify(hex) + + +def serializable_property(name, docstring=None): + def set(obj, value): + obj._ensure_parsed() + setattr(obj, "_"+name, value) + obj._needs_serialization = True + def get(obj): + obj._ensure_parsed() + return getattr(obj, "_"+name) + return property(get, set, doc=docstring) class ShaFile(object): """A git SHA file.""" - + @classmethod def _parse_legacy_object(cls, map): """Parse a legacy object, creating it and setting object._text""" @@ -83,22 +109,46 @@ class ShaFile(object): i = 0 while text[0] >= '0' and text[0] <= '9': if i > 0 and size == 0: - assert False, "Size is not in canonical format" + raise AssertionError("Size is not in canonical format") size = (size * 10) + int(text[0]) text = text[1:] i += 1 object._size = size assert text[0] == "\0", "Size not followed by null" text = text[1:] - object._text = text + object.set_raw_string(text) return object def as_legacy_object(self): - return zlib.compress("%s %d\0%s" % (self._type, len(self._text), self._text)) - + text = self.as_raw_string() + return zlib.compress("%s %d\0%s" % (self._type, len(text), text)) + def as_raw_string(self): - return self._num_type, self._text - + if self._needs_serialization: + self.serialize() + return self._text + + def __str__(self): + return self.as_raw_string() + + def __hash__(self): + return hash(self.id) + + def as_pretty_string(self): + return self.as_raw_string() + + def _ensure_parsed(self): + if self._needs_parsing: + self._parse_text() + + def set_raw_string(self, text): + if type(text) != str: + raise TypeError(text) + self._text = text + self._sha = None + self._needs_parsing = True + self._needs_serialization = False + @classmethod def _parse_object(cls, map): """Parse a new style object , creating it and setting object._text""" @@ -114,9 +164,9 @@ class ShaFile(object): byte = ord(map[used]) used += 1 raw = map[used:] - object._text = _decompress(raw) + object.set_raw_string(_decompress(raw)) return object - + @classmethod def _parse_file(cls, map): word = (ord(map[0]) << 8) + ord(map[1]) @@ -124,68 +174,90 @@ class ShaFile(object): return cls._parse_legacy_object(map) else: return cls._parse_object(map) - + def __init__(self): """Don't call this directly""" - + self._sha = None + def _parse_text(self): """For subclasses to do initialisation time parsing""" - + @classmethod def from_file(cls, filename): """Get the contents of a SHA file on disk""" size = os.path.getsize(filename) - f = open(filename, 'rb') + f = GitFile(filename, 'rb') try: map = mmap.mmap(f.fileno(), size, access=mmap.ACCESS_READ) shafile = cls._parse_file(map) - shafile._parse_text() return shafile finally: f.close() - + @classmethod def from_raw_string(cls, type, string): """Creates an object of the indicated type from the raw string given. - + Type is the numeric type of an object. String is the raw uncompressed contents. """ real_class = num_type_map[type] obj = real_class() - obj._num_type = type - obj._text = string - obj._parse_text() + obj.type = type + obj.set_raw_string(string) return obj - + + @classmethod + def from_string(cls, string): + """Create a blob from a string.""" + shafile = cls() + shafile.set_raw_string(string) + return shafile + + def _raw_length(self): + """Returns the length of the raw string of this object.""" + return len(self.as_raw_string()) + def _header(self): - return "%s %lu\0" % (self._type, len(self._text)) - + return "%s %lu\0" % (self._type, self._raw_length()) + + def _make_sha(self): + ret = make_sha() + ret.update(self._header()) + ret.update(self.as_raw_string()) + return ret + def sha(self): """The SHA1 object that is the name of this object.""" - ressha = sha.new() - ressha.update(self._header()) - ressha.update(self._text) - return ressha - + if self._needs_serialization or self._sha is None: + self._sha = self._make_sha() + return self._sha + @property def id(self): return self.sha().hexdigest() - - @property - def type(self): + + def get_type(self): return self._num_type - + + def set_type(self, type): + self._num_type = type + + type = property(get_type, set_type) + def __repr__(self): return "<%s %s>" % (self.__class__.__name__, self.id) - + + def __ne__(self, other): + return self.id != other.id + def __eq__(self, other): """Return true id the sha of the two objects match. - + The __le__ etc methods aren't overriden as they make no sense, certainly at this level. """ - return self.sha().digest() == other.sha().digest() + return self.id == other.id class Blob(ShaFile): @@ -194,11 +266,56 @@ class Blob(ShaFile): _type = BLOB_ID _num_type = 3 - @property - def data(self): - """The text contained within the blob object.""" + def __init__(self): + super(Blob, self).__init__() + self._chunked = [] + self._text = "" + self._needs_parsing = False + self._needs_serialization = False + + def _get_data(self): + if self._needs_serialization: + self.serialize() return self._text + def _set_data(self, data): + self._text = data + self._needs_parsing = True + self._needs_serialization = False + + data = property(_get_data, _set_data, + "The text contained within the blob object.") + + def _get_chunked(self): + self._ensure_parsed() + return self._chunked + + def _set_chunked(self, chunks): + self._chunked = chunks + self._needs_serialization = True + + chunked = property(_get_chunked, _set_chunked, + "The text within the blob object, as chunks (not necessarily lines).") + + def _parse_text(self): + self._chunked = [self._text] + + def serialize(self): + self._text = "".join(self._chunked) + + def _raw_length(self): + ret = 0 + for chunk in self.chunked: + ret += len(chunk) + return ret + + def _make_sha(self): + ret = make_sha() + ret.update(self._header()) + for chunk in self._chunked: + ret.update(chunk) + return ret + @classmethod def from_file(cls, filename): blob = ShaFile.from_file(filename) @@ -206,13 +323,6 @@ class Blob(ShaFile): raise NotBlobError(filename) return blob - @classmethod - def from_string(cls, string): - """Create a blob from a string.""" - shafile = cls() - shafile._text = string - return shafile - class Tag(ShaFile): """A Git Tag object.""" @@ -220,6 +330,11 @@ class Tag(ShaFile): _type = TAG_ID _num_type = 4 + def __init__(self): + super(Tag, self).__init__() + self._needs_parsing = False + self._needs_serialization = True + @classmethod def from_file(cls, filename): blob = ShaFile.from_file(filename) @@ -231,130 +346,130 @@ class Tag(ShaFile): def from_string(cls, string): """Create a blob from a string.""" shafile = cls() - shafile._text = string + shafile.set_raw_string(string) return shafile + def serialize(self): + f = StringIO() + f.write("%s %s\n" % (OBJECT_ID, self._object_sha)) + f.write("%s %s\n" % (TYPE_ID, num_type_map[self._object_type]._type)) + f.write("%s %s\n" % (TAG_ID, self._name)) + if self._tagger: + if self._tag_time is None: + f.write("%s %s\n" % (TAGGER_ID, self._tagger)) + else: + f.write("%s %s %d %s\n" % (TAGGER_ID, self._tagger, self._tag_time, format_timezone(self._tag_timezone))) + f.write("\n") # To close headers + f.write(self._message) + self._text = f.getvalue() + self._needs_serialization = False + def _parse_text(self): """Grab the metadata attached to the tag""" - text = self._text - count = 0 - assert text.startswith(OBJECT_ID), "Invalid tag object, " \ - "must start with %s" % OBJECT_ID - count += len(OBJECT_ID) - assert text[count] == ' ', "Invalid tag object, " \ - "%s must be followed by space not %s" % (OBJECT_ID, text[count]) - count += 1 - self._object_sha = text[count:count+40] - count += 40 - assert text[count] == '\n', "Invalid tag object, " \ - "%s sha must be followed by newline" % OBJECT_ID - count += 1 - assert text[count:].startswith(TYPE_ID), "Invalid tag object, " \ - "%s sha must be followed by %s" % (OBJECT_ID, TYPE_ID) - count += len(TYPE_ID) - assert text[count] == ' ', "Invalid tag object, " \ - "%s must be followed by space not %s" % (TAG_ID, text[count]) - count += 1 - self._object_type = "" - while text[count] != '\n': - self._object_type += text[count] - count += 1 - count += 1 - assert self._object_type in (COMMIT_ID, BLOB_ID, TREE_ID, TAG_ID), "Invalid tag object, " \ - "unexpected object type %s" % self._object_type - self._object_type = type_map[self._object_type] - - assert text[count:].startswith(TAG_ID), "Invalid tag object, " \ - "object type must be followed by %s" % (TAG_ID) - count += len(TAG_ID) - assert text[count] == ' ', "Invalid tag object, " \ - "%s must be followed by space not %s" % (TAG_ID, text[count]) - count += 1 - self._name = "" - while text[count] != '\n': - self._name += text[count] - count += 1 - count += 1 - - assert text[count:].startswith(TAGGER_ID), "Invalid tag object, " \ - "%s must be followed by %s" % (TAG_ID, TAGGER_ID) - count += len(TAGGER_ID) - assert text[count] == ' ', "Invalid tag object, " \ - "%s must be followed by space not %s" % (TAGGER_ID, text[count]) - count += 1 - self._tagger = "" - while text[count] != '>': - assert text[count] != '\n', "Malformed tagger information" - self._tagger += text[count] - count += 1 - self._tagger += text[count] - count += 1 - assert text[count] == ' ', "Invalid tag object, " \ - "tagger information must be followed by space not %s" % text[count] - count += 1 - self._tag_time = int(text[count:count+10]) - while text[count] != '\n': - count += 1 - count += 1 - assert text[count] == '\n', "There must be a new line after the headers" - count += 1 - self._message = text[count:] - - @property - def object(self): + self._tagger = None + f = StringIO(self._text) + for l in f: + l = l.rstrip("\n") + if l == "": + break # empty line indicates end of headers + (field, value) = l.split(" ", 1) + if field == OBJECT_ID: + self._object_sha = value + elif field == TYPE_ID: + self._object_type = type_map[value] + elif field == TAG_ID: + self._name = value + elif field == TAGGER_ID: + try: + sep = value.index("> ") + except ValueError: + self._tagger = value + self._tag_time = None + self._tag_timezone = None + else: + self._tagger = value[0:sep+1] + (timetext, timezonetext) = value[sep+2:].rsplit(" ", 1) + try: + self._tag_time = int(timetext) + except ValueError: #Not a unix timestamp + self._tag_time = time.strptime(timetext) + self._tag_timezone = parse_timezone(timezonetext) + else: + raise AssertionError("Unknown field %s" % field) + self._message = f.read() + self._needs_parsing = False + + def _get_object(self): """Returns the object pointed by this tag, represented as a tuple(type, sha)""" + self._ensure_parsed() return (self._object_type, self._object_sha) - @property - def name(self): - """Returns the name of this tag""" - return self._name - - @property - def tagger(self): - """Returns the name of the person who created this tag""" - return self._tagger - - @property - def tag_time(self): - """Returns the creation timestamp of the tag. + def _set_object(self, value): + self._ensure_parsed() + (self._object_type, self._object_sha) = value + self._needs_serialization = True - Returns it as the number of seconds since the epoch""" - return self._tag_time + object = property(_get_object, _set_object) - @property - def message(self): - """Returns the message attached to this tag""" - return self._message + name = serializable_property("name", "The name of this tag") + tagger = serializable_property("tagger", + "Returns the name of the person who created this tag") + tag_time = serializable_property("tag_time", + "The creation timestamp of the tag. As the number of seconds since the epoch") + tag_timezone = serializable_property("tag_timezone", + "The timezone that tag_time is in.") + message = serializable_property("message", "The message attached to this tag") def parse_tree(text): - ret = [] + """Parse a tree text. + + :param text: Serialized text to parse + :return: Dictionary with names as keys, (mode, sha) tuples as values + """ + ret = {} count = 0 - while count < len(text): - mode = 0 - chr = text[count] - while chr != ' ': - assert chr >= '0' and chr <= '7', "%s is not a valid mode char" % chr - mode = (mode << 3) + (ord(chr) - ord('0')) - count += 1 - chr = text[count] - count += 1 - chr = text[count] - name = '' - while chr != '\0': - name += chr - count += 1 - chr = text[count] - count += 1 - chr = text[count] - sha = text[count:count+20] - hexsha = sha_to_hex(sha) - ret.append((mode, name, hexsha)) - count = count + 20 + l = len(text) + while count < l: + mode_end = text.index(' ', count) + mode = int(text[count:mode_end], 8) + name_end = text.index('\0', mode_end) + name = text[mode_end+1:name_end] + count = name_end+21 + sha = text[name_end+1:count] + ret[name] = (mode, sha_to_hex(sha)) return ret +def serialize_tree(items): + """Serialize the items in a tree to a text. + + :param items: Sorted iterable over (name, mode, sha) tuples + :return: Serialized tree text + """ + f = StringIO() + for name, mode, hexsha in items: + f.write("%04o %s\0%s" % (mode, name, hex_to_sha(hexsha))) + return f.getvalue() + + +def sorted_tree_items(entries): + """Iterate over a tree entries dictionary in the order in which + the items would be serialized. + + :param entries: Dictionary mapping names to (mode, sha) tuples + :return: Iterator over (name, mode, sha) + """ + def cmp_entry((name1, value1), (name2, value2)): + if stat.S_ISDIR(value1[0]): + name1 += "/" + if stat.S_ISDIR(value2[0]): + name2 += "/" + return cmp(name1, name2) + for name, entry in sorted(entries.iteritems(), cmp=cmp_entry): + yield name, entry[0], entry[1] + + class Tree(ShaFile): """A Git tree object""" @@ -362,7 +477,10 @@ class Tree(ShaFile): _num_type = 2 def __init__(self): + super(Tree, self).__init__() self._entries = {} + self._needs_parsing = False + self._needs_serialization = True @classmethod def from_file(cls, filename): @@ -372,39 +490,89 @@ class Tree(ShaFile): return tree def __contains__(self, name): + self._ensure_parsed() return name in self._entries def __getitem__(self, name): + self._ensure_parsed() return self._entries[name] def __setitem__(self, name, value): assert isinstance(value, tuple) assert len(value) == 2 + self._ensure_parsed() self._entries[name] = value + self._needs_serialization = True def __delitem__(self, name): + self._ensure_parsed() del self._entries[name] + self._needs_serialization = True + + def __len__(self): + self._ensure_parsed() + return len(self._entries) def add(self, mode, name, hexsha): + assert type(mode) == int + assert type(name) == str + assert type(hexsha) == str + self._ensure_parsed() self._entries[name] = mode, hexsha + self._needs_serialization = True def entries(self): """Return a list of tuples describing the tree entries""" - # The order of this is different from iteritems() for historical reasons - return [(mode, name, hexsha) for (name, mode, hexsha) in self.iteritems()] + self._ensure_parsed() + # The order of this is different from iteritems() for historical + # reasons + return [ + (mode, name, hexsha) for (name, mode, hexsha) in self.iteritems()] def iteritems(self): - for name in sorted(self._entries.keys()): - yield name, self._entries[name][0], self._entries[name][1] + """Iterate over all entries in the order in which they would be + serialized. + + :return: Iterator over (name, mode, sha) tuples + """ + self._ensure_parsed() + return sorted_tree_items(self._entries) def _parse_text(self): """Grab the entries in the tree""" self._entries = parse_tree(self._text) + self._needs_parsing = False def serialize(self): - self._text = "" + self._text = serialize_tree(self.iteritems()) + self._needs_serialization = False + + def as_pretty_string(self): + text = "" for name, mode, hexsha in self.iteritems(): - self._text += "%04o %s\0%s" % (mode, name, hex_to_sha(hexsha)) + if mode & stat.S_IFDIR: + kind = "tree" + else: + kind = "blob" + text += "%04o %s %s\t%s\n" % (mode, kind, hexsha, name) + return text + + +def parse_timezone(text): + offset = int(text) + signum = (offset < 0) and -1 or 1 + offset = abs(offset) + hours = int(offset / 100) + minutes = (offset % 100) + return signum * (hours * 3600 + minutes * 60) + + +def format_timezone(offset): + if offset % 60 != 0: + raise ValueError("Unable to handle non-minute offset.") + sign = (offset < 0) and '-' or '+' + offset = abs(offset) + return '%c%02d%02d' % (sign, offset / 3600, (offset / 60) % 60) class Commit(ShaFile): @@ -414,7 +582,12 @@ class Commit(ShaFile): _num_type = 1 def __init__(self): + super(Commit, self).__init__() self._parents = [] + self._encoding = None + self._needs_parsing = False + self._needs_serialization = True + self._extra = {} @classmethod def from_file(cls, filename): @@ -424,147 +597,98 @@ class Commit(ShaFile): return commit def _parse_text(self): - text = self._text - count = 0 - assert text.startswith(TREE_ID), "Invalid commit object, " \ - "must start with %s" % TREE_ID - count += len(TREE_ID) - assert text[count] == ' ', "Invalid commit object, " \ - "%s must be followed by space not %s" % (TREE_ID, text[count]) - count += 1 - self._tree = text[count:count+40] - count = count + 40 - assert text[count] == "\n", "Invalid commit object, " \ - "tree sha must be followed by newline" - count += 1 self._parents = [] - while text[count:].startswith(PARENT_ID): - count += len(PARENT_ID) - assert text[count] == ' ', "Invalid commit object, " \ - "%s must be followed by space not %s" % (PARENT_ID, text[count]) - count += 1 - self._parents.append(text[count:count+40]) - count += 40 - assert text[count] == "\n", "Invalid commit object, " \ - "parent sha must be followed by newline" - count += 1 + self._extra = [] self._author = None - if text[count:].startswith(AUTHOR_ID): - count += len(AUTHOR_ID) - assert text[count] == ' ', "Invalid commit object, " \ - "%s must be followed by space not %s" % (AUTHOR_ID, text[count]) - count += 1 - self._author = '' - while text[count] != '>': - assert text[count] != '\n', "Malformed author information" - self._author += text[count] - count += 1 - self._author += text[count] - count += 1 - assert text[count] == ' ', "Invalid commit object, " \ - "author information must be followed by space not %s" % text[count] - count += 1 - self._author_time = int(text[count:count+10]) - while text[count] != ' ': - assert text[count] != '\n', "Malformed author information" - count += 1 - self._author_timezone = int(text[count:count+6]) - count += 1 - while text[count] != '\n': - count += 1 - count += 1 - self._committer = None - if text[count:].startswith(COMMITTER_ID): - count += len(COMMITTER_ID) - assert text[count] == ' ', "Invalid commit object, " \ - "%s must be followed by space not %s" % (COMMITTER_ID, text[count]) - count += 1 - self._committer = '' - while text[count] != '>': - assert text[count] != '\n', "Malformed committer information" - self._committer += text[count] - count += 1 - self._committer += text[count] - count += 1 - assert text[count] == ' ', "Invalid commit object, " \ - "commiter information must be followed by space not %s" % text[count] - count += 1 - self._commit_time = int(text[count:count+10]) - while text[count] != ' ': - assert text[count] != '\n', "Malformed committer information" - count += 1 - self._commit_timezone = int(text[count:count+6]) - count += 1 - while text[count] != '\n': - count += 1 - count += 1 - assert text[count] == '\n', "There must be a new line after the headers" - count += 1 - # XXX: There can be an encoding field. - self._message = text[count:] + f = StringIO(self._text) + for l in f: + l = l.rstrip("\n") + if l == "": + # Empty line indicates end of headers + break + (field, value) = l.split(" ", 1) + if field == TREE_ID: + self._tree = value + elif field == PARENT_ID: + self._parents.append(value) + elif field == AUTHOR_ID: + self._author, timetext, timezonetext = value.rsplit(" ", 2) + self._author_time = int(timetext) + self._author_timezone = parse_timezone(timezonetext) + elif field == COMMITTER_ID: + self._committer, timetext, timezonetext = value.rsplit(" ", 2) + self._commit_time = int(timetext) + self._commit_timezone = parse_timezone(timezonetext) + elif field == ENCODING_ID: + self._encoding = value + else: + self._extra.append((field, value)) + self._message = f.read() + self._needs_parsing = False def serialize(self): - self._text = "" - self._text += "%s %s\n" % (TREE_ID, self._tree) + f = StringIO() + f.write("%s %s\n" % (TREE_ID, self._tree)) for p in self._parents: - self._text += "%s %s\n" % (PARENT_ID, p) - self._text += "%s %s %s %+05d\n" % (AUTHOR_ID, self._author, str(self._author_time), self._author_timezone) - self._text += "%s %s %s %+05d\n" % (COMMITTER_ID, self._committer, str(self._commit_time), self._commit_timezone) - self._text += "\n" # There must be a new line after the headers - self._text += self._message - - @property - def tree(self): - """Returns the tree that is the state of this commit""" - return self._tree - - @property - def parents(self): + f.write("%s %s\n" % (PARENT_ID, p)) + f.write("%s %s %s %s\n" % (AUTHOR_ID, self._author, str(self._author_time), format_timezone(self._author_timezone))) + f.write("%s %s %s %s\n" % (COMMITTER_ID, self._committer, str(self._commit_time), format_timezone(self._commit_timezone))) + if self.encoding: + f.write("%s %s\n" % (ENCODING_ID, self.encoding)) + for k, v in self.extra: + if "\n" in k or "\n" in v: + raise AssertionError("newline in extra data: %r -> %r" % (k, v)) + f.write("%s %s\n" % (k, v)) + f.write("\n") # There must be a new line after the headers + f.write(self._message) + self._text = f.getvalue() + self._needs_serialization = False + + tree = serializable_property("tree", "Tree that is the state of this commit") + + def _get_parents(self): """Return a list of parents of this commit.""" + self._ensure_parsed() return self._parents - @property - def author(self): - """Returns the name of the author of the commit""" - return self._author + def _set_parents(self, value): + """Set a list of parents of this commit.""" + self._ensure_parsed() + self._needs_serialization = True + self._parents = value - @property - def committer(self): - """Returns the name of the committer of the commit""" - return self._committer + parents = property(_get_parents, _set_parents) - @property - def message(self): - """Returns the commit message""" - return self._message + def _get_extra(self): + """Return extra settings of this commit.""" + self._ensure_parsed() + return self._extra - @property - def commit_time(self): - """Returns the timestamp of the commit. - - Returns it as the number of seconds since the epoch. - """ - return self._commit_time + extra = property(_get_extra) - @property - def commit_timezone(self): - """Returns the zone the commit time is in - """ - return self._commit_timezone + author = serializable_property("author", + "The name of the author of the commit") - @property - def author_time(self): - """Returns the timestamp the commit was written. - - Returns it as the number of seconds since the epoch. - """ - return self._author_time + committer = serializable_property("committer", + "The name of the committer of the commit") - @property - def author_timezone(self): - """Returns the zone the author time is in - """ - return self._author_timezone + message = serializable_property("message", + "The commit message") + + commit_time = serializable_property("commit_time", + "The timestamp of the commit. As the number of seconds since the epoch.") + + commit_timezone = serializable_property("commit_timezone", + "The zone the commit time is in") + + author_time = serializable_property("author_time", + "The timestamp the commit was written. as the number of seconds since the epoch.") + + author_timezone = serializable_property("author_timezone", + "Returns the zone the author time is in.") + + encoding = serializable_property("encoding", + "Encoding of the commit message.") type_map = { @@ -585,7 +709,6 @@ num_type_map = { try: # Try to import C versions - from dulwich._objects import hex_to_sha, sha_to_hex, parse_tree + from dulwich._objects import parse_tree, sorted_tree_items except ImportError: pass -