shell bypass 403

Cubjrnet7 Shell


name : transport.h
/**
@file
@brief    Message transport between kernel and userspace
@details  Copyright (c) 2017 Acronis International GmbH
@author   Mikhail Krivtsov ([email protected])
@since    $Id: $
*/

#pragma once

#include "compat.h"
#include "ring.h"
#include "safe_kobject.h"
#include "set.h"
#include <linux/fs.h>
#include <linux/types.h>	// bool, [u]int(8|16|32|64)_t, pid_t

#include "transport_id.h"
#include "transport_protocol.h"

// This is the header of the buffer that is 'mmap'd between kernel and user space.
// Both reader & write must use READ_ONCE/WRITE_ONCE/smp_* when accessing its contents.
typedef struct {
	// 'head' and 'tail' is ring-like - reader reads from the 'head' and moves it to the 'tail'
	// 'writer' moves the 'tail' as showing that more content is available.
	// In our case, kernel=writer & userspace=reader 

	// Head is written only by userspace, it is specifying offset in 'entries' in bytes
	// When head is written by userspace using 'smp_store_release', kernel must 'smp_load_acquire' it
	uint32_t head ____cacheline_aligned_in_smp;
	// Tail is written only by kernelspace, it is specifying offset in 'entries' in bytes
	uint32_t tail ____cacheline_aligned_in_smp;
	// Entries with the data, varsized struct where 'data_queue_entry_t' itself is varsized
	data_queue_entry_t entries[] ____cacheline_aligned_in_smp;
} shared_data_queue_t;

typedef struct {
	atomic_t refcount;
	wait_queue_head_t msg_wait_queue;
} transport_event_t;

typedef struct {
	safe_kobject_t skobj;
	pid_t control_tgid;
	uint64_t events_mask;
	uint64_t events_subtype_inclusion_mask;
	uint64_t events_subtype_exclusion_mask;

	spinlock_t msg_spinlock;
	transport_event_t* event;
	bool shutdown;
	uint8_t client_type;
	ring_t msg_ring;
	// sent messages waiting for 'reply'
	set_t sent_msgs_set;
	uint32_t queue_size;
	shared_data_queue_t *queue;
	atomic_t queue_event;
	transport_id_t transport_id;

	uint64_t bytes_written;
	uint32_t insert_filled_size;
	uint32_t insert_filled_size_max;
} transport_t;

typedef struct {
	pid_t control_tgid[MAX_TRANSPORT_SIZE];
	transport_t *transports[MAX_TRANSPORT_SIZE];
} transports_t;

typedef struct {
	struct mutex transport_mutex;
	unsigned transport_count;

	uint64_t combined_events_mask;
	uint64_t combined_events_subtype_inclusion_mask;
	uint64_t combined_events_subtype_exclusion_mask;

	transports_t transports;
	atomic64_t msg_id_sequence;

	uint64_t last_transport_seq_num;
	transport_ids_t transport_ids;
} transport_global_t;

// 'module' 'init'/'down'
int transport_mod_init(void);
void transport_mod_down(void);

// 'device' 'fops'
int transport_device_open(struct inode *, struct file *);
long transport_device_ioctl(struct file *, unsigned int, unsigned long);
ssize_t transport_device_read(struct file *, char __user *, size_t, loff_t *);
ssize_t transport_device_write(struct file *, const char __user *, size_t,
			       loff_t *);
int transport_device_release(struct inode *, struct file *);
int transport_device_mmap(struct file *filp, struct vm_area_struct *vma);

extern transport_global_t transport_global;

static inline uint64_t transport_global_get_combined_mask(void)
{
	return READ_ONCE(transport_global.combined_events_mask);
}

static inline bool transport_global_subtype_needed(uint64_t subtype)
{
	if (!(READ_ONCE(transport_global.combined_events_subtype_inclusion_mask) & subtype)) {
		return false;
	}

	if (READ_ONCE(transport_global.combined_events_subtype_exclusion_mask) & subtype) {
		return false;
	}

	return true;
}

static inline void transport_global_get_ids(transport_ids_t *ids)
{
	int i;
	for (i = 0; i < MAX_TRANSPORT_SIZE; i++) {
		ids->ids[i] = READ_ONCE(transport_global.transport_ids.ids[i]);
	}
}

static inline bool transport_is_control_tgid(pid_t tgid)
{
	int i;
	for (i = 0; i < MAX_TRANSPORT_SIZE; i++) {
		if (READ_ONCE(transport_global.transports.control_tgid[i]) == tgid)
			return true;
	}

	return false;
}

static inline bool transport_process_belongs_to_control(struct task_struct* tsk)
{
	struct task_struct* parent;
	pid_t parent_tgid = 0;
	if (transport_is_control_tgid(tsk->tgid))
		return true;

	rcu_read_lock();
	parent = rcu_dereference(tsk->real_parent);
	if (parent) {
		parent_tgid = parent->tgid;
	}
	rcu_read_unlock();

	if (parent && transport_is_control_tgid(parent_tgid))
		return true;

	return false;
}

static inline msg_id_t transport_global_sequence_next(void)
{
	return 1 + atomic64_inc_return(&transport_global.msg_id_sequence);
}

struct msg_s;
void send_msg_async(struct msg_s *msg);
void send_msg_sync(struct msg_s *msg);
void send_msg_async_unref_unchecked(struct msg_s *msg);
void send_msg_sync_unref_unchecked(struct msg_s *msg);

static inline void send_msg_async_unref(struct msg_s *msg) {
	if (msg)
		send_msg_async_unref_unchecked(msg);
}
static inline void send_msg_sync_unref(struct msg_s *msg) {
	if (msg)
		send_msg_sync_unref_unchecked(msg);
}

© 2025 Cubjrnet7