Mercurial > hg
comparison rust/hg-core/src/revlog/index.rs @ 51264:47a34afda7ad
rust-index: only access offsets if revlog is inline
Accessing the `RwLock` ended up showing up in profiles even with no contention.
Offsets only exist for inline revlogs, so gate everything behind an inline
check.
author | Raphaël Gomès <rgomes@octobus.net> |
---|---|
date | Thu, 14 Dec 2023 09:57:25 +0100 |
parents | 9088c6d65ef6 |
children | c4cbb515b006 |
comparison
equal
deleted
inserted
replaced
51263:5b4995b40db0 | 51264:47a34afda7ad |
---|---|
401 &self.bytes[start..end] | 401 &self.bytes[start..end] |
402 } | 402 } |
403 | 403 |
404 /// Return number of entries of the revlog index. | 404 /// Return number of entries of the revlog index. |
405 pub fn len(&self) -> usize { | 405 pub fn len(&self) -> usize { |
406 if let Some(offsets) = &*self.get_offsets() { | 406 if self.is_inline() { |
407 offsets.len() | 407 (*self.get_offsets()) |
408 .as_ref() | |
409 .expect("inline should have offsets") | |
410 .len() | |
408 } else { | 411 } else { |
409 self.bytes.len() / INDEX_ENTRY_SIZE | 412 self.bytes.len() / INDEX_ENTRY_SIZE |
410 } | 413 } |
411 } | 414 } |
412 | 415 |
413 pub fn get_offsets(&self) -> RwLockReadGuard<Option<Vec<usize>>> { | 416 pub fn get_offsets(&self) -> RwLockReadGuard<Option<Vec<usize>>> { |
414 if self.is_inline() { | 417 assert!(self.is_inline()); |
415 { | 418 { |
416 // Wrap in a block to drop the read guard | 419 // Wrap in a block to drop the read guard |
417 // TODO perf? | 420 // TODO perf? |
418 let mut offsets = self.offsets.write().unwrap(); | 421 let mut offsets = self.offsets.write().unwrap(); |
419 if offsets.is_none() { | 422 if offsets.is_none() { |
420 offsets.replace(inline_scan(&self.bytes.bytes).1); | 423 offsets.replace(inline_scan(&self.bytes.bytes).1); |
421 } | |
422 } | 424 } |
423 } | 425 } |
424 self.offsets.read().unwrap() | 426 self.offsets.read().unwrap() |
425 } | 427 } |
426 | 428 |
427 pub fn get_offsets_mut(&mut self) -> RwLockWriteGuard<Option<Vec<usize>>> { | 429 pub fn get_offsets_mut(&mut self) -> RwLockWriteGuard<Option<Vec<usize>>> { |
430 assert!(self.is_inline()); | |
428 let mut offsets = self.offsets.write().unwrap(); | 431 let mut offsets = self.offsets.write().unwrap(); |
429 if self.is_inline() && offsets.is_none() { | 432 if offsets.is_none() { |
430 offsets.replace(inline_scan(&self.bytes.bytes).1); | 433 offsets.replace(inline_scan(&self.bytes.bytes).1); |
431 } | 434 } |
432 offsets | 435 offsets |
433 } | 436 } |
434 | 437 |
444 /// if it was validated by this index. | 447 /// if it was validated by this index. |
445 pub fn get_entry(&self, rev: Revision) -> Option<IndexEntry> { | 448 pub fn get_entry(&self, rev: Revision) -> Option<IndexEntry> { |
446 if rev == NULL_REVISION { | 449 if rev == NULL_REVISION { |
447 return None; | 450 return None; |
448 } | 451 } |
449 Some(if let Some(offsets) = &*self.get_offsets() { | 452 Some(if self.is_inline() { |
450 self.get_entry_inline(rev, offsets.as_ref()) | 453 self.get_entry_inline(rev) |
451 } else { | 454 } else { |
452 self.get_entry_separated(rev) | 455 self.get_entry_separated(rev) |
453 }) | 456 }) |
454 } | 457 } |
455 | 458 |
500 node_id: e.hash().as_bytes().try_into().unwrap(), | 503 node_id: e.hash().as_bytes().try_into().unwrap(), |
501 ..Default::default() | 504 ..Default::default() |
502 }) | 505 }) |
503 } | 506 } |
504 | 507 |
505 fn get_entry_inline( | 508 fn get_entry_inline(&self, rev: Revision) -> IndexEntry { |
506 &self, | 509 let offsets = &self.get_offsets(); |
507 rev: Revision, | 510 let offsets = offsets.as_ref().expect("inline should have offsets"); |
508 offsets: &[usize], | |
509 ) -> IndexEntry { | |
510 let start = offsets[rev.0 as usize]; | 511 let start = offsets[rev.0 as usize]; |
511 let end = start + INDEX_ENTRY_SIZE; | 512 let end = start + INDEX_ENTRY_SIZE; |
512 let bytes = &self.bytes[start..end]; | 513 let bytes = &self.bytes[start..end]; |
513 | 514 |
514 // See IndexEntry for an explanation of this override. | 515 // See IndexEntry for an explanation of this override. |
701 pub fn append( | 702 pub fn append( |
702 &mut self, | 703 &mut self, |
703 revision_data: RevisionDataParams, | 704 revision_data: RevisionDataParams, |
704 ) -> Result<(), RevlogError> { | 705 ) -> Result<(), RevlogError> { |
705 revision_data.validate()?; | 706 revision_data.validate()?; |
706 let new_offset = self.bytes.len(); | 707 if self.is_inline() { |
707 if let Some(offsets) = &mut *self.get_offsets_mut() { | 708 let new_offset = self.bytes.len(); |
708 offsets.push(new_offset) | 709 if let Some(offsets) = &mut *self.get_offsets_mut() { |
710 offsets.push(new_offset) | |
711 } | |
709 } | 712 } |
710 self.bytes.added.extend(revision_data.into_v1().as_bytes()); | 713 self.bytes.added.extend(revision_data.into_v1().as_bytes()); |
711 self.clear_head_revs(); | 714 self.clear_head_revs(); |
712 Ok(()) | 715 Ok(()) |
713 } | 716 } |
715 pub fn pack_header(&self, header: i32) -> [u8; 4] { | 718 pub fn pack_header(&self, header: i32) -> [u8; 4] { |
716 header.to_be_bytes() | 719 header.to_be_bytes() |
717 } | 720 } |
718 | 721 |
719 pub fn remove(&mut self, rev: Revision) -> Result<(), RevlogError> { | 722 pub fn remove(&mut self, rev: Revision) -> Result<(), RevlogError> { |
720 let offsets = self.get_offsets().clone(); | 723 let offsets = if self.is_inline() { |
724 self.get_offsets().clone() | |
725 } else { | |
726 None | |
727 }; | |
721 self.bytes.remove(rev, offsets.as_deref())?; | 728 self.bytes.remove(rev, offsets.as_deref())?; |
722 if let Some(offsets) = &mut *self.get_offsets_mut() { | 729 if self.is_inline() { |
723 offsets.truncate(rev.0 as usize) | 730 if let Some(offsets) = &mut *self.get_offsets_mut() { |
731 offsets.truncate(rev.0 as usize) | |
732 } | |
724 } | 733 } |
725 self.clear_head_revs(); | 734 self.clear_head_revs(); |
726 Ok(()) | 735 Ok(()) |
727 } | 736 } |
728 | 737 |