1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
use super::*;
use crate::utils::binary::{parse_hex_string_to_u64, BareMetalConfig, MemorySegment};
use goblin::{elf, Object};

/// A representation of the runtime image of a binary after being loaded into memory by the loader.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Hash, Clone)]
pub struct RuntimeMemoryImage {
    /// Sequence of memory segments.
    pub memory_segments: Vec<MemorySegment>,
    /// Endianness
    pub is_little_endian: bool,
    /// True iff we are analyzing a Linux loadable kernel module.
    pub is_lkm: bool,
}

impl RuntimeMemoryImage {
    /// Generate a runtime memory image containing no memory segments.
    /// Primarily useful in situations where any access to global memory would be an error.
    pub fn empty(is_little_endian: bool) -> RuntimeMemoryImage {
        RuntimeMemoryImage {
            memory_segments: Vec::new(),
            is_little_endian,
            is_lkm: false,
        }
    }

    /// Generate a runtime memory image for a given binary.
    ///
    /// The function can parse ELF and PE files as input.
    pub fn new(binary: &[u8]) -> Result<Self, Error> {
        match Object::parse(binary)? {
            Object::Elf(elf_file) => match elf_file.header.e_type {
                elf::header::ET_REL => Self::from_elf_sections(binary, elf_file),
                elf::header::ET_DYN | elf::header::ET_EXEC => {
                    Self::from_elf_segments(binary, elf_file)
                }
                ty => Err(anyhow!("Unsupported ELF type: e_type {}", ty)),
            },
            Object::PE(pe_file) => {
                let mut memory_segments = Vec::new();
                for header in pe_file.sections.iter() {
                    if (header.characteristics & 0x02000000) == 0 {
                        // Only load segments which are not discardable
                        memory_segments.push(MemorySegment::from_pe_section(binary, header));
                    }
                }
                if memory_segments.is_empty() {
                    return Err(anyhow!("No loadable segments found"));
                }
                let mut memory_image = RuntimeMemoryImage {
                    memory_segments,
                    is_little_endian: true,
                    is_lkm: false,
                };
                memory_image.add_global_memory_offset(pe_file.image_base as u64);
                Ok(memory_image)
            }
            _ => Err(anyhow!("Object type not supported.")),
        }
    }

    /// Generate a runtime memory image for an executable ELF file or shared object.
    fn from_elf_segments(binary: &[u8], elf_file: elf::Elf) -> Result<Self, Error> {
        let mut memory_segments = Vec::new();

        for header in elf_file.program_headers.iter() {
            if header.p_type == elf::program_header::PT_LOAD {
                memory_segments.push(MemorySegment::from_elf_segment(binary, header));
            }
        }

        if memory_segments.is_empty() {
            return Err(anyhow!("No loadable segments found"));
        }

        Ok(Self {
            memory_segments,
            is_little_endian: elf_file.header.endianness().unwrap().is_little(),
            is_lkm: false,
        })
    }

    /// Generate a runtime memory image for a relocatable object file.
    ///
    /// These files do not contain information about the expected memory layout.
    /// Ghidra implements a basic loader that essentially concatenates all
    /// `SHF_ALLOC` sections that are not `SHT_NULL`. They are placed in memory
    /// as close as possible while respecting their alignment, starting at a
    /// fixed address. We start mapping at zero and shift by the actual base
    /// address that Ghidra has chosen after running our plugin.
    ///
    /// NOTE: It is important that this implementation stays in sync with what
    /// `processSectionHeaders` in [`ElfProgramBuilder`] does in the cases that
    /// we care about.
    ///
    /// [`ElfProgramBuilder`]: https://github.com/NationalSecurityAgency/ghidra/blob/master/Ghidra/Features/Base/src/main/java/ghidra/app/util/opinion/ElfProgramBuilder.java
    fn from_elf_sections(binary: &[u8], elf_file: elf::Elf) -> Result<Self, Error> {
        let mut next_base = 0;

        Ok(Self {
            memory_segments: elf_file
                .section_headers
                .iter()
                .filter_map(|section_header| {
                    if is_loaded(section_header) {
                        let mem_seg =
                            MemorySegment::from_elf_section(binary, next_base, section_header);
                        next_base = mem_seg.base_address + mem_seg.bytes.len() as u64;
                        Some(mem_seg)
                    } else {
                        None
                    }
                })
                .collect(),
            is_little_endian: elf_file.header.endianness().unwrap().is_little(),
            is_lkm: get_section(".modinfo", &elf_file).is_some()
                && get_section(".gnu.linkonce.this_module", &elf_file).is_some(),
        })
    }

    /// Generate a runtime memory image for a bare metal binary.
    ///
    /// The generated runtime memory image contains:
    /// * one memory region corresponding to non-volatile memory
    /// * one memory region corresponding to volatile memory (RAM)
    ///
    /// See [`BareMetalConfig`] for more information about the assumed memory layout for bare metal binaries.
    pub fn new_from_bare_metal(
        binary: &[u8],
        bare_metal_config: &BareMetalConfig,
    ) -> Result<Self, Error> {
        let processor_id_parts: Vec<&str> = bare_metal_config.processor_id.split(':').collect();
        if processor_id_parts.len() < 3 {
            return Err(anyhow!("Could not parse processor ID."));
        }
        let is_little_endian = match processor_id_parts[1] {
            "LE" => true,
            "BE" => false,
            _ => return Err(anyhow!("Could not parse endianness of the processor ID.")),
        };
        let flash_base_address = parse_hex_string_to_u64(&bare_metal_config.flash_base_address)?;
        let ram_base_address = parse_hex_string_to_u64(&bare_metal_config.ram_base_address)?;
        let ram_size = parse_hex_string_to_u64(&bare_metal_config.ram_size)?;
        // Check that the whole binary is contained in addressable space.
        let address_bit_length = processor_id_parts[2].parse::<u64>()?;
        match flash_base_address.checked_add(binary.len() as u64) {
            Some(max_address) => {
                if (max_address >> address_bit_length) != 0 {
                    return Err(anyhow!("Binary too large for given base address"));
                }
            }
            None => return Err(anyhow!("Binary too large for given base address")),
        }

        Ok(RuntimeMemoryImage {
            memory_segments: vec![
                MemorySegment::from_bare_metal_file(binary, flash_base_address),
                MemorySegment::new_bare_metal_ram_segment(ram_base_address, ram_size),
            ],
            is_little_endian,
            is_lkm: false,
        })
    }

    /// Get the base address for the image of a binary when loaded into memory.
    pub fn get_base_address(binary: &[u8]) -> Result<u64, Error> {
        match Object::parse(binary)? {
            Object::Elf(elf_file) => match elf_file.header.e_type {
                elf::header::ET_REL => Ok(0),
                elf::header::ET_DYN | elf::header::ET_EXEC => {
                    elf_file
                        .program_headers
                        .iter()
                        .find_map(|header| {
                            let vm_range = header.vm_range();
                            if !vm_range.is_empty()
                                && header.p_type == goblin::elf::program_header::PT_LOAD
                            {
                                // The loadable segments have to occur in order in the program header table.
                                // So the start address of the first loadable segment is the base offset of the binary.
                                Some(vm_range.start as u64)
                            } else {
                                None
                            }
                        })
                        .context("No loadable segment bounds found.")
                }
                ty => Err(anyhow!("Unsupported ELF type: e_type {}", ty)),
            },
            _ => Err(anyhow!("Binary type not yet supported")),
        }
    }

    /// Return whether values in the memory image should be interpreted in little-endian
    /// or big-endian byte order.
    pub fn is_little_endian_byte_order(&self) -> bool {
        self.is_little_endian
    }

    /// Add a global offset to the base addresses of all memory segments.
    /// Useful to align the addresses with those reported by Ghidra
    /// if the Ghidra backend added such an offset to all addresses.
    pub fn add_global_memory_offset(&mut self, offset: u64) {
        for segment in self.memory_segments.iter_mut() {
            segment.base_address += offset;
        }
    }

    /// Read the contents of the memory image at the given address
    /// to emulate a read instruction to global data at runtime.
    ///
    /// The read method is endian-aware,
    /// i.e. values are interpreted with the endianness of the CPU architecture.
    /// If the address points to a writeable segment, the returned value is a `Ok(None)` value,
    /// since the data may change during program execution.
    ///
    /// Returns an error if the address is not contained in the global data address range.
    pub fn read(&self, address: &Bitvector, size: ByteSize) -> Result<Option<Bitvector>, Error> {
        let address = address.try_to_u64().unwrap();
        for segment in self.memory_segments.iter() {
            if address >= segment.base_address
                && u64::from(size) <= segment.base_address + segment.bytes.len() as u64
                && address <= segment.base_address + segment.bytes.len() as u64 - u64::from(size)
            {
                if segment.write_flag {
                    // The segment is writeable, thus we do not know the content at runtime.
                    return Ok(None);
                }
                let index = (address - segment.base_address) as usize;
                let mut bytes = segment.bytes[index..index + u64::from(size) as usize].to_vec();
                if self.is_little_endian {
                    bytes = bytes.into_iter().rev().collect();
                }
                let mut bytes = bytes.into_iter();
                let mut bitvector = Bitvector::from_u8(bytes.next().unwrap());
                for byte in bytes {
                    let new_byte = Bitvector::from_u8(byte);
                    bitvector = bitvector.bin_op(BinOpType::Piece, &new_byte)?;
                }
                return Ok(Some(bitvector));
            }
        }
        // No segment fully contains the read.
        Err(anyhow!("Address is not a valid global memory address."))
    }

    /// Read the contents of memory from a given address onwards until a null byte is reached and checks whether the
    /// content is a valid UTF8 string.
    pub fn read_string_until_null_terminator(&self, address: &Bitvector) -> Result<&str, Error> {
        let address = address.try_to_u64().unwrap();
        for segment in self.memory_segments.iter() {
            if address >= segment.base_address
                && address <= segment.base_address + segment.bytes.len() as u64
            {
                let start_index = (address - segment.base_address) as usize;
                if let Some(end_index) = segment.bytes[start_index..].iter().position(|&b| b == 0) {
                    let c_str = std::ffi::CStr::from_bytes_with_nul(
                        &segment.bytes[start_index..start_index + end_index + 1],
                    )?;
                    return Ok(c_str.to_str()?);
                } else {
                    return Err(anyhow!("Not a valid string in memory."));
                }
            }
        }

        Err(anyhow!("Address is not a valid global memory address."))
    }

    /// Checks whether the constant is a global memory address.
    pub fn is_global_memory_address(&self, constant: &Bitvector) -> bool {
        if self.read(constant, constant.bytesize()).is_ok() {
            return true;
        }
        false
    }

    /// Check whether all addresses in the given interval point to a readable segment in the runtime memory image.
    ///
    /// Returns an error if the address interval intersects more than one memory segment
    /// or if it does not point to global memory at all.
    pub fn is_interval_readable(
        &self,
        start_address: u64,
        end_address: u64,
    ) -> Result<bool, Error> {
        for segment in self.memory_segments.iter() {
            if start_address >= segment.base_address
                && start_address < segment.base_address + segment.bytes.len() as u64
            {
                if end_address <= segment.base_address + segment.bytes.len() as u64 {
                    return Ok(segment.read_flag);
                } else {
                    return Err(anyhow!("Interval spans more than one segment"));
                }
            }
        }
        Err(anyhow!("Address not contained in runtime memory image"))
    }

    /// For an address to global read-only memory, return the memory segment it points to
    /// and the index inside the segment, where the address points to.
    ///
    /// Returns an error if the target memory segment is marked as writeable
    /// or if the pointer does not point to global memory.
    pub fn get_ro_data_pointer_at_address(
        &self,
        address: &Bitvector,
    ) -> Result<(&[u8], usize), Error> {
        let address = address.try_to_u64().unwrap();
        for segment in self.memory_segments.iter() {
            if address >= segment.base_address
                && address < segment.base_address + segment.bytes.len() as u64
            {
                if segment.write_flag {
                    return Err(anyhow!("Target segment is writeable"));
                } else {
                    return Ok((&segment.bytes, (address - segment.base_address) as usize));
                }
            }
        }
        Err(anyhow!("Pointer target not in global memory."))
    }

    /// Check whether the given address points to a writeable segment in the runtime memory image.
    ///
    /// Returns an error if the address does not point to global memory.
    pub fn is_address_writeable(&self, address: &Bitvector) -> Result<bool, Error> {
        let address = address.try_to_u64().unwrap();
        for segment in self.memory_segments.iter() {
            if address >= segment.base_address
                && address < segment.base_address + segment.bytes.len() as u64
            {
                return Ok(segment.write_flag);
            }
        }
        Err(anyhow!("Address not contained in runtime memory image"))
    }

    /// Check whether all addresses in the given interval point to a writeable segment in the runtime memory image.
    ///
    /// Returns an error if the address interval intersects more than one memory segment
    /// or if it does not point to global memory at all.
    pub fn is_interval_writeable(
        &self,
        start_address: u64,
        end_address: u64,
    ) -> Result<bool, Error> {
        for segment in self.memory_segments.iter() {
            if start_address >= segment.base_address
                && start_address < segment.base_address + segment.bytes.len() as u64
            {
                if end_address <= segment.base_address + segment.bytes.len() as u64 {
                    return Ok(segment.write_flag);
                } else {
                    return Err(anyhow!("Interval spans more than one segment"));
                }
            }
        }
        Err(anyhow!("Address not contained in runtime memory image"))
    }
}

/// Returns the section header of the first section with this name.
fn get_section<'a>(name: &str, elf_file: &'a elf::Elf<'a>) -> Option<&'a elf::SectionHeader> {
    let sh_strtab = &elf_file.shdr_strtab;

    elf_file.section_headers.iter().find(|section_header| {
        matches!(sh_strtab.get_at(section_header.sh_name), Some(sh_name) if sh_name == name)
    })
}

/// Returns true iff the section header will be loaded into memory by Ghidra.
#[inline]
fn is_loaded(section_header: &elf::SectionHeader) -> bool {
    section_header.is_alloc()
        && section_header.sh_type != elf::section_header::SHT_NULL
        && section_header.sh_size != 0
}

#[cfg(test)]
mod tests {
    use crate::{bitvec, intermediate_representation::*};

    #[test]
    fn read_endianness() {
        let mut mem_image = RuntimeMemoryImage::mock();
        let address = bitvec!("0x1001:4");
        assert_eq!(
            mem_image.read(&address, ByteSize::new(4)).unwrap(),
            bitvec!("0xb4b3b2b1:4").into()
        );
        mem_image.is_little_endian = false;
        assert_eq!(
            mem_image.read(&address, ByteSize::new(4)).unwrap(),
            bitvec!("0xb1b2b3b4:4").into()
        );
    }

    #[test]
    fn ro_data_pointer() {
        let mem_image = RuntimeMemoryImage::mock();
        let address = bitvec!("0x1002:4");
        let (slice, index) = mem_image.get_ro_data_pointer_at_address(&address).unwrap();
        assert_eq!(index, 2);
        assert_eq!(&slice[index..], &[0xb2u8, 0xb3, 0xb4]);
    }

    #[test]
    fn test_read_string_until_null_terminator() {
        let mem_image = RuntimeMemoryImage::mock();
        // the byte array contains "Hello World".
        let expected_string: &str =
            std::str::from_utf8(b"\x48\x65\x6c\x6c\x6f\x20\x57\x6f\x72\x6c\x64").unwrap();
        let address = bitvec!("0x3002:4");
        assert_eq!(
            expected_string,
            mem_image
                .read_string_until_null_terminator(&address)
                .unwrap(),
        );
    }
}