1 # pack.py -- For dealing wih packed git objects.
2 # Copyright (C) 2007 James Westby <jw+debian@jameswestby.net>
3 # Copryight (C) 2008 Jelmer Vernooij <jelmer@samba.org>
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; version 2
8 # of the License or (at your option) a later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
20 """Classes for dealing with packed git objects.
22 A pack is a compact representation of a bunch of objects, stored
23 using deltas where possible.
25 They have two parts, the pack file, which stores the data, and an index
26 that tells you where the data is.
28 To find an object you look in all of the index files 'til you find a
29 match for the object name. You then use the pointer got from this as
30 a pointer in to the corresponding packfile.
34 from collections import defaultdict
36 from misc import defaultdict
38 from itertools import imap, izip
43 from struct import unpack_from
45 from dulwich.misc import unpack_from
50 from dulwich.errors import (
54 from dulwich.lru_cache import (
57 from dulwich.objects import (
62 from dulwich.misc import make_sha
64 supports_mmap_offset = (sys.version_info[0] >= 3 or
65 (sys.version_info[0] == 2 and sys.version_info[1] >= 6))
68 def take_msb_bytes(map, offset):
70 while len(ret) == 0 or ret[-1] & 0x80:
71 ret.append(ord(map[offset]))
76 def read_zlib(data, offset, dec_size):
77 obj = zlib.decompressobj()
80 while obj.unused_data == "":
82 add = data[base:base+1024]
86 x += obj.decompress(add)
87 assert len(x) == dec_size
88 comp_len = fed-len(obj.unused_data)
96 return sha1.hexdigest()
99 MAX_MMAP_SIZE = 1024 * 1024 * 1024
101 def simple_mmap(f, offset, size, access=mmap.ACCESS_READ):
102 """Simple wrapper for mmap() which always supports the offset parameter.
104 :param f: File object.
105 :param offset: Offset in the file, from the beginning of the file.
106 :param size: Size of the mmap'ed area
107 :param access: Access mechanism.
108 :return: MMAP'd area.
110 if offset+size > MAX_MMAP_SIZE and not supports_mmap_offset:
111 raise AssertionError("%s is larger than 256 meg, and this version "
112 "of Python does not support the offset argument to mmap().")
113 if supports_mmap_offset:
114 return mmap.mmap(f.fileno(), size, access=access, offset=offset), 0
116 mem = mmap.mmap(f.fileno(), size+offset, access=access)
120 class PackIndex(object):
121 """An index in to a packfile.
123 Given a sha id of an object a pack index can tell you the location in the
124 packfile of that object if it has it.
126 To do the loop it opens the file, and indexes first 256 4 byte groups
127 with the first byte of the sha id. The value in the four byte group indexed
128 is the end of the group that shares the same starting byte. Subtract one
129 from the starting byte and index again to find the start of the group.
130 The values are sorted by sha id within the group, so do the math to find
131 the start and end offset and then bisect in to find if the value is present.
134 def __init__(self, filename):
135 """Create a pack index object.
137 Provide it with the name of the index file to consider, and it will map
138 it whenever required.
140 self._filename = filename
141 # Take the size now, so it can be checked each time we map the file to
142 # ensure that it hasn't changed.
143 self._size = os.path.getsize(filename)
144 self._file = open(filename, 'r')
145 self._contents, map_offset = simple_mmap(self._file, 0, self._size)
146 assert map_offset == 0
147 if self._contents[:4] != '\377tOc':
149 self._fan_out_table = self._read_fan_out_table(0)
151 (self.version, ) = unpack_from(">L", self._contents, 4)
152 assert self.version in (2,), "Version was %d" % self.version
153 self._fan_out_table = self._read_fan_out_table(8)
154 self._name_table_offset = 8 + 0x100 * 4
155 self._crc32_table_offset = self._name_table_offset + 20 * len(self)
156 self._pack_offset_table_offset = self._crc32_table_offset + 4 * len(self)
158 def __eq__(self, other):
159 if type(self) != type(other):
162 if self._fan_out_table != other._fan_out_table:
165 for (name1, _, _), (name2, _, _) in izip(self.iterentries(), other.iterentries()):
174 """Return the number of entries in this pack index."""
175 return self._fan_out_table[-1]
177 def _unpack_entry(self, i):
178 """Unpack the i-th entry in the index file.
180 :return: Tuple with object name (SHA), offset in pack file and
181 CRC32 checksum (if known)."""
182 if self.version == 1:
183 (offset, name) = unpack_from(">L20s", self._contents,
184 (0x100 * 4) + (i * 24))
185 return (name, offset, None)
187 return (self._unpack_name(i), self._unpack_offset(i),
188 self._unpack_crc32_checksum(i))
190 def _unpack_name(self, i):
191 if self.version == 1:
192 offset = (0x100 * 4) + (i * 24) + 4
194 offset = self._name_table_offset + i * 20
195 return self._contents[offset:offset+20]
197 def _unpack_offset(self, i):
198 if self.version == 1:
199 offset = (0x100 * 4) + (i * 24)
201 offset = self._pack_offset_table_offset + i * 4
202 return unpack_from(">L", self._contents, offset)[0]
204 def _unpack_crc32_checksum(self, i):
205 if self.version == 1:
208 return unpack_from(">L", self._contents,
209 self._crc32_table_offset + i * 4)[0]
212 return imap(sha_to_hex, self._itersha())
215 for i in range(len(self)):
216 yield self._unpack_name(i)
218 def objects_sha1(self):
219 return iter_sha1(self._itersha())
221 def iterentries(self):
222 """Iterate over the entries in this pack index.
224 Will yield tuples with object name, offset in packfile and crc32 checksum.
226 for i in range(len(self)):
227 yield self._unpack_entry(i)
229 def _read_fan_out_table(self, start_offset):
231 for i in range(0x100):
232 ret.append(struct.unpack(">L", self._contents[start_offset+i*4:start_offset+(i+1)*4])[0])
236 """Check that the stored checksum matches the actual checksum."""
237 return self.calculate_checksum() == self.get_stored_checksum()
239 def calculate_checksum(self):
240 f = open(self._filename, 'r')
242 return make_sha(self._contents[:-20]).digest()
246 def get_pack_checksum(self):
247 """Return the SHA1 checksum stored for the corresponding packfile."""
248 return str(self._contents[-40:-20])
250 def get_stored_checksum(self):
251 """Return the SHA1 checksum stored for this index."""
252 return str(self._contents[-20:])
254 def object_index(self, sha):
255 """Return the index in to the corresponding packfile for the object.
257 Given the name of an object it will return the offset that object lives
258 at within the corresponding pack file. If the pack file doesn't have the
259 object then None will be returned.
262 sha = hex_to_sha(sha)
263 return self._object_index(sha)
265 def _object_index(self, sha):
268 :param sha: A *binary* SHA string. (20 characters long)_
270 assert len(sha) == 20
275 start = self._fan_out_table[idx-1]
276 end = self._fan_out_table[idx]
280 file_sha = self._unpack_name(i)
286 return self._unpack_offset(i)
290 def read_pack_header(f):
292 assert header[:4] == "PACK"
293 (version,) = unpack_from(">L", header, 4)
294 assert version in (2, 3), "Version was %d" % version
295 (num_objects,) = unpack_from(">L", header, 8)
296 return (version, num_objects)
299 def read_pack_tail(f):
303 def unpack_object(map, offset=0):
304 bytes = take_msb_bytes(map, offset)
305 type = (bytes[0] >> 4) & 0x07
306 size = bytes[0] & 0x0f
307 for i, byte in enumerate(bytes[1:]):
308 size += (byte & 0x7f) << ((i * 7) + 4)
309 raw_base = len(bytes)
310 if type == 6: # offset delta
311 bytes = take_msb_bytes(map, raw_base + offset)
312 assert not (bytes[-1] & 0x80)
313 delta_base_offset = bytes[0] & 0x7f
314 for byte in bytes[1:]:
315 delta_base_offset += 1
316 delta_base_offset <<= 7
317 delta_base_offset += (byte & 0x7f)
319 uncomp, comp_len = read_zlib(map, offset + raw_base, size)
320 assert size == len(uncomp)
321 return type, (delta_base_offset, uncomp), comp_len+raw_base
322 elif type == 7: # ref delta
323 basename = map[offset+raw_base:offset+raw_base+20]
324 uncomp, comp_len = read_zlib(map, offset+raw_base+20, size)
325 assert size == len(uncomp)
326 return type, (basename, uncomp), comp_len+raw_base+20
328 uncomp, comp_len = read_zlib(map, offset+raw_base, size)
329 assert len(uncomp) == size
330 return type, uncomp, comp_len+raw_base
333 def compute_object_size((num, obj)):
336 assert isinstance(obj, str)
340 class PackData(object):
341 """The data contained in a packfile.
343 Pack files can be accessed both sequentially for exploding a pack, and
344 directly with the help of an index to retrieve a specific object.
346 The objects within are either complete or a delta aginst another.
348 The header is variable length. If the MSB of each byte is set then it
349 indicates that the subsequent byte is still part of the header.
350 For the first byte the next MS bits are the type, which tells you the type
351 of object, and whether it is a delta. The LS byte is the lowest bits of the
352 size. For each subsequent byte the LS 7 bits are the next MS bits of the
353 size, i.e. the last byte of the header contains the MS bits of the size.
355 For the complete objects the data is stored as zlib deflated data.
356 The size in the header is the uncompressed object size, so to uncompress
357 you need to just keep feeding data to zlib until you get an object back,
358 or it errors on bad data. This is done here by just giving the complete
359 buffer from the start of the deflated object on. This is bad, but until I
360 get mmap sorted out it will have to do.
362 Currently there are no integrity checks done. Also no attempt is made to try
363 and detect the delta case, or a request for an object at the wrong position.
364 It will all just throw a zlib or KeyError.
367 def __init__(self, filename):
368 """Create a PackData object that represents the pack in the given filename.
370 The file must exist and stay readable until the object is disposed of. It
371 must also stay the same size. It will be mapped whenever needed.
373 Currently there is a restriction on the size of the pack as the python
374 mmap implementation is flawed.
376 self._filename = filename
377 assert os.path.exists(filename), "%s is not a packfile" % filename
378 self._size = os.path.getsize(filename)
379 self._header_size = 12
380 assert self._size >= self._header_size, "%s is too small for a packfile (%d < %d)" % (filename, self._size, self._header_size)
382 self._offset_cache = LRUSizeCache(1024*1024*100,
383 compute_size=compute_object_size)
385 def _read_header(self):
386 f = open(self._filename, 'rb')
388 (version, self._num_objects) = \
390 f.seek(self._size-20)
391 (self._stored_checksum,) = read_pack_tail(f)
396 """Returns the number of objects in this pack."""
397 return self._num_objects
399 def calculate_checksum(self):
400 """Calculate the checksum for this pack."""
401 f = open(self._filename, 'rb')
403 map, map_offset = simple_mmap(f, 0, self._size - 20)
404 return make_sha(map[map_offset:self._size-20]).digest()
408 def resolve_object(self, offset, type, obj, get_ref, get_offset=None):
409 """Resolve an object, possibly resolving deltas when necessary.
411 :return: Tuple with object type and contents.
413 if type not in (6, 7): # Not a delta
416 if get_offset is None:
417 get_offset = self.get_object_at
419 if type == 6: # offset delta
420 (delta_offset, delta) = obj
421 assert isinstance(delta_offset, int)
422 assert isinstance(delta, str)
423 base_offset = offset-delta_offset
424 type, base_obj = get_offset(base_offset)
425 assert isinstance(type, int)
426 elif type == 7: # ref delta
427 (basename, delta) = obj
428 assert isinstance(basename, str) and len(basename) == 20
429 assert isinstance(delta, str)
430 type, base_obj = get_ref(basename)
431 assert isinstance(type, int)
432 # Can't be a ofs delta, as we wouldn't know the base offset
435 type, base_text = self.resolve_object(base_offset, type, base_obj, get_ref)
436 if base_offset is not None:
437 self._offset_cache[base_offset] = type, base_text
438 ret = (type, apply_delta(base_text, delta))
441 def iterobjects(self):
442 offset = self._header_size
443 f = open(self._filename, 'rb')
445 map, _ = simple_mmap(f, 0, self._size)
447 (type, obj, total_size) = unpack_object(map, offset)
448 crc32 = zlib.crc32(map[offset:offset+total_size]) & 0xffffffff
449 yield offset, type, obj, crc32
453 def iterentries(self, ext_resolve_ref=None):
455 postponed = defaultdict(list)
456 class Postpone(Exception):
457 """Raised to postpone delta resolving."""
459 def get_ref_text(sha):
464 return ext_resolve_ref(sha)
467 raise Postpone, (sha, )
468 todo = list(self.iterobjects())
470 (offset, type, obj, crc32) = todo.pop(0)
471 assert isinstance(offset, int)
472 assert isinstance(type, int)
473 assert isinstance(obj, tuple) or isinstance(obj, str)
475 type, obj = self.resolve_object(offset, type, obj, get_ref_text)
476 except Postpone, (sha, ):
477 postponed[sha].append((offset, type, obj))
479 shafile = ShaFile.from_raw_string(type, obj)
480 sha = shafile.sha().digest()
481 found[sha] = (type, obj)
482 yield sha, offset, crc32
483 todo += postponed.get(sha, [])
485 raise KeyError([sha_to_hex(h) for h in postponed.keys()])
487 def sorted_entries(self, resolve_ext_ref=None):
488 ret = list(self.iterentries(resolve_ext_ref))
492 def create_index_v1(self, filename, resolve_ext_ref=None):
493 entries = self.sorted_entries(resolve_ext_ref)
494 write_pack_index_v1(filename, entries, self.calculate_checksum())
496 def create_index_v2(self, filename, resolve_ext_ref=None):
497 entries = self.sorted_entries(resolve_ext_ref)
498 write_pack_index_v2(filename, entries, self.calculate_checksum())
500 def get_stored_checksum(self):
501 return self._stored_checksum
504 return (self.calculate_checksum() == self.get_stored_checksum())
506 def get_object_at(self, offset):
507 """Given an offset in to the packfile return the object that is there.
509 Using the associated index the location of an object can be looked up, and
510 then the packfile can be asked directly for that object using this
513 if offset in self._offset_cache:
514 return self._offset_cache[offset]
515 assert isinstance(offset, long) or isinstance(offset, int),\
516 "offset was %r" % offset
517 assert offset >= self._header_size
518 f = open(self._filename, 'rb')
520 map, map_offset = simple_mmap(f, offset, self._size-offset)
521 ret = unpack_object(map, map_offset)[:2]
527 class SHA1Writer(object):
529 def __init__(self, f):
531 self.sha1 = make_sha("")
533 def write(self, data):
534 self.sha1.update(data)
538 sha = self.sha1.digest()
539 assert len(sha) == 20
544 sha = self.write_sha()
552 def write_pack_object(f, type, object):
553 """Write pack object to a file.
555 :param f: File to write to
556 :param o: Object to write
557 :return: Tuple with offset at which the object was written, and crc32
561 if type == 6: # ref delta
562 (delta_base_offset, object) = object
563 elif type == 7: # offset delta
564 (basename, object) = object
566 c = (type << 4) | (size & 15)
569 packed_data_hdr += (chr(c | 0x80))
572 packed_data_hdr += chr(c)
573 if type == 6: # offset delta
574 ret = [delta_base_offset & 0x7f]
575 delta_base_offset >>= 7
576 while delta_base_offset:
577 delta_base_offset -= 1
578 ret.insert(0, 0x80 | (delta_base_offset & 0x7f))
579 delta_base_offset >>= 7
580 packed_data_hdr += "".join([chr(x) for x in ret])
581 elif type == 7: # ref delta
582 assert len(basename) == 20
583 packed_data_hdr += basename
584 packed_data = packed_data_hdr + zlib.compress(object)
586 return (f.tell(), (zlib.crc32(packed_data) & 0xffffffff))
589 def write_pack(filename, objects, num_objects):
590 f = open(filename + ".pack", 'w')
592 entries, data_sum = write_pack_data(f, objects, num_objects)
596 write_pack_index_v2(filename + ".idx", entries, data_sum)
599 def write_pack_data(f, objects, num_objects, window=10):
600 """Write a new pack file.
602 :param filename: The filename of the new pack file.
603 :param objects: List of objects to write (tuples with object and path)
604 :return: List with (name, offset, crc32 checksum) entries, pack checksum
606 recency = list(objects)
607 # FIXME: Somehow limit delta depth
608 # FIXME: Make thin-pack optional (its not used when cloning a pack)
609 # Build a list of objects ordered by the magic Linus heuristic
610 # This helps us find good objects to diff against us
612 for obj, path in recency:
613 magic.append( (obj.type, path, 1, -len(obj.as_raw_string()[1]), obj) )
615 # Build a map of objects and their index in magic - so we can find preceeding objects
618 for i in range(len(magic)):
619 offs[magic[i][4]] = i
623 f.write("PACK") # Pack header
624 f.write(struct.pack(">L", 2)) # Pack version
625 f.write(struct.pack(">L", num_objects)) # Number of objects in pack
626 for o, path in recency:
627 sha1 = o.sha().digest()
628 orig_t, raw = o.as_raw_string()
631 #for i in range(offs[o]-window, window):
632 # if i < 0 or i >= len(offs): continue
634 # if b.type != orig_t: continue
635 # _, base = b.as_raw_string()
636 # delta = create_delta(base, raw)
637 # if len(delta) < len(winner):
639 # t = 6 if magic[i][2] == 1 else 7
640 offset, crc32 = write_pack_object(f, t, winner)
641 entries.append((sha1, offset, crc32))
642 return entries, f.write_sha()
645 def write_pack_index_v1(filename, entries, pack_checksum):
646 """Write a new pack index file.
648 :param filename: The filename of the new pack index file.
649 :param entries: List of tuples with object name (sha), offset_in_pack, and
651 :param pack_checksum: Checksum of the pack file.
653 f = open(filename, 'w')
655 fan_out_table = defaultdict(lambda: 0)
656 for (name, offset, entry_checksum) in entries:
657 fan_out_table[ord(name[0])] += 1
659 for i in range(0x100):
660 f.write(struct.pack(">L", fan_out_table[i]))
661 fan_out_table[i+1] += fan_out_table[i]
662 for (name, offset, entry_checksum) in entries:
663 f.write(struct.pack(">L20s", offset, name))
664 assert len(pack_checksum) == 20
665 f.write(pack_checksum)
669 def create_delta(base_buf, target_buf):
670 """Use python difflib to work out how to transform base_buf to target_buf"""
671 assert isinstance(base_buf, str)
672 assert isinstance(target_buf, str)
675 def encode_size(size):
685 out_buf += encode_size(len(base_buf))
686 out_buf += encode_size(len(target_buf))
687 # write out delta opcodes
688 seq = difflib.SequenceMatcher(a=base_buf, b=target_buf)
689 for opcode, i1, i2, j1, j2 in seq.get_opcodes():
690 # Git patch opcodes don't care about deletes!
691 #if opcode == "replace" or opcode == "delete":
693 if opcode == "equal":
694 # If they are equal, unpacker will use data from base_buf
695 # Write out an opcode that says what range to use
701 scratch += chr(o >> i)
706 scratch += chr(s >> i)
710 if opcode == "replace" or opcode == "insert":
711 # If we are replacing a range or adding one, then we just
712 # output it to the stream (prefixed by its size)
717 out_buf += target_buf[o:o+127]
721 out_buf += target_buf[o:o+s]
725 def apply_delta(src_buf, delta):
726 """Based on the similar function in git's patch-delta.c.
728 :param src_buf: Source buffer
729 :param delta: Delta instructions
731 assert isinstance(src_buf, str), "was %r" % (src_buf,)
732 assert isinstance(delta, str)
735 delta_length = len(delta)
736 def get_delta_header_size(delta, index):
740 cmd = ord(delta[index])
742 size |= (cmd & ~0x80) << i
747 src_size, index = get_delta_header_size(delta, index)
748 dest_size, index = get_delta_header_size(delta, index)
749 assert src_size == len(src_buf), "%d vs %d" % (src_size, len(src_buf))
750 while index < delta_length:
751 cmd = ord(delta[index])
757 x = ord(delta[index])
759 cp_off |= x << (i * 8)
762 if cmd & (1 << (4+i)):
763 x = ord(delta[index])
765 cp_size |= x << (i * 8)
768 if (cp_off + cp_size < cp_size or
769 cp_off + cp_size > src_size or
770 cp_size > dest_size):
772 out.append(src_buf[cp_off:cp_off+cp_size])
774 out.append(delta[index:index+cmd])
777 raise ApplyDeltaError("Invalid opcode 0")
779 if index != delta_length:
780 raise ApplyDeltaError("delta not empty: %r" % delta[index:])
783 if dest_size != len(out):
784 raise ApplyDeltaError("dest size incorrect")
789 def write_pack_index_v2(filename, entries, pack_checksum):
790 """Write a new pack index file.
792 :param filename: The filename of the new pack index file.
793 :param entries: List of tuples with object name (sha), offset_in_pack, and
795 :param pack_checksum: Checksum of the pack file.
797 f = open(filename, 'w')
799 f.write('\377tOc') # Magic!
800 f.write(struct.pack(">L", 2))
801 fan_out_table = defaultdict(lambda: 0)
802 for (name, offset, entry_checksum) in entries:
803 fan_out_table[ord(name[0])] += 1
805 for i in range(0x100):
806 f.write(struct.pack(">L", fan_out_table[i]))
807 fan_out_table[i+1] += fan_out_table[i]
808 for (name, offset, entry_checksum) in entries:
810 for (name, offset, entry_checksum) in entries:
811 f.write(struct.pack(">L", entry_checksum))
812 for (name, offset, entry_checksum) in entries:
813 # FIXME: handle if MSBit is set in offset
814 f.write(struct.pack(">L", offset))
815 # FIXME: handle table for pack files > 8 Gb
816 assert len(pack_checksum) == 20
817 f.write(pack_checksum)
823 def __init__(self, basename):
824 self._basename = basename
825 self._data_path = self._basename + ".pack"
826 self._idx_path = self._basename + ".idx"
831 """The SHA over the SHAs of the objects in this pack."""
832 return self.idx.objects_sha1()
836 if self._data is None:
837 self._data = PackData(self._data_path)
838 assert len(self.idx) == len(self._data)
839 idx_stored_checksum = self.idx.get_pack_checksum()
840 data_stored_checksum = self._data.get_stored_checksum()
841 if idx_stored_checksum != data_stored_checksum:
842 raise ChecksumMismatch(sha_to_hex(idx_stored_checksum),
843 sha_to_hex(data_stored_checksum))
848 if self._idx is None:
849 self._idx = PackIndex(self._idx_path)
853 if self._data is not None:
857 def __eq__(self, other):
858 return type(self) == type(other) and self.idx == other.idx
861 """Number of entries in this pack."""
865 return "Pack(%r)" % self._basename
868 """Iterate over all the sha1s of the objects in this pack."""
869 return iter(self.idx)
872 if not self.idx.check():
874 if not self.data.check():
878 def get_stored_checksum(self):
879 return self.data.get_stored_checksum()
881 def __contains__(self, sha1):
882 """Check whether this pack contains a particular SHA1."""
883 return (self.idx.object_index(sha1) is not None)
885 def get_raw(self, sha1, resolve_ref=None):
886 offset = self.idx.object_index(sha1)
889 type, obj = self.data.get_object_at(offset)
890 if isinstance(offset, long):
892 if resolve_ref is None:
893 resolve_ref = self.get_raw
894 assert isinstance(offset, int)
895 return self.data.resolve_object(offset, type, obj, resolve_ref)
897 def __getitem__(self, sha1):
898 """Retrieve the specified SHA1."""
899 type, uncomp = self.get_raw(sha1)
900 return ShaFile.from_raw_string(type, uncomp)
902 def iterobjects(self, get_raw=None):
904 get_raw = self.get_raw
905 for offset, type, obj, crc32 in self.data.iterobjects():
906 assert isinstance(offset, int)
907 yield ShaFile.from_raw_string(
908 *self.data.resolve_object(offset, type, obj, get_raw))
911 def load_packs(path):
912 if not os.path.exists(path):
914 for name in os.listdir(path):
915 if name.startswith("pack-") and name.endswith(".pack"):
916 yield Pack(os.path.join(path, name[:-len(".pack")]))
920 from dulwich._pack import apply_delta