shell bypass 403

Cubjrnet7 Shell


name : transport.h
/**
@file
@brief    Message transport between kernel and userspace
@details  Copyright (c) 2017 Acronis International GmbH
@author   Mikhail Krivtsov ([email protected])
@since    $Id: $
*/

#pragma once

#include "ring.h"
#include "set.h"
#include <linux/fs.h>
#include <linux/types.h>	// bool, [u]int(8|16|32|64)_t, pid_t

#include "transport_id.h"
#include "transport_protocol.h"

// This is the header of the buffer that is 'mmap'd between kernel and user space.
// Both reader & write must use READ_ONCE/WRITE_ONCE/smp_* when accessing its contents.
typedef struct {
	// 'head' and 'tail' is ring-like - reader reads from the 'head' and moves it to the 'tail'
	// 'writer' moves the 'tail' as showing that more content is available.
	// In our case, kernel=writer & userspace=reader 

	// Head is written only by userspace, it is specifying offset in 'entries' in bytes
	// When head is written by userspace using 'smp_store_release', kernel must 'smp_load_acquire' it
	uint32_t head ____cacheline_aligned_in_smp;
	// Tail is written only by kernelspace, it is specifying offset in 'entries' in bytes
	uint32_t tail ____cacheline_aligned_in_smp;
	// Entries with the data, varsized struct where 'data_queue_entry_t' itself is varsized
	data_queue_entry_t entries[0] ____cacheline_aligned_in_smp;
} shared_data_queue_t;

typedef struct {
	struct list_head transport_list_node;

	pid_t control_tgid;
	uint64_t events_mask;

	// FIXME: Use 'msg_wait_queue.lock' instead of 'msg_spinlock'
	// #define msg_spinlock msg_wait_queue.lock
	spinlock_t msg_spinlock;
	wait_queue_head_t msg_wait_queue;
	bool shutdown;
	ring_t msg_ring;
	// sent messages waiting for 'reply'
	set_t sent_msgs_set;
	uint32_t queue_size;
	shared_data_queue_t *queue;
	atomic_t queue_event;
	transport_id_t transport_id;
} transport_t;

typedef struct {
	struct mutex transport_mutex;
	unsigned transport_count;

	uint64_t combined_events_mask;

	spinlock_t transport_spinlock;
	struct list_head transport_list;
	msg_id_t msg_id_sequence;

	uint64_t last_transport_seq_num;
	transport_ids_t transport_ids;
} transport_global_t;

// 'module' 'init'/'down'
int transport_mod_init(void);
void transport_mod_down(void);

// 'device' 'fops'
int transport_device_open(struct inode *, struct file *);
long transport_device_ioctl(struct file *, unsigned int, unsigned long);
ssize_t transport_device_read(struct file *, char __user *, size_t, loff_t *);
ssize_t transport_device_write(struct file *, const char __user *, size_t,
			       loff_t *);
int transport_device_release(struct inode *, struct file *);
int transport_device_mmap(struct file *filp, struct vm_area_struct *vma);

uint64_t transport_global_get_combined_mask(void);
void transport_global_get_ids(transport_ids_t *ids);

bool transport_is_control_tgid(pid_t tgid);

msg_id_t transport_global_sequence_next(void);

struct msg_s;
void send_msg_async(struct msg_s *msg);
void send_msg_sync(struct msg_s *msg);
void send_msg_async_unref_unchecked(struct msg_s *msg);
void send_msg_sync_unref_unchecked(struct msg_s *msg);

static inline void send_msg_async_unref(struct msg_s *msg) {
	if (msg)
		send_msg_async_unref_unchecked(msg);
}
static inline void send_msg_sync_unref(struct msg_s *msg) {
	if (msg)
		send_msg_sync_unref_unchecked(msg);
}

© 2025 Cubjrnet7