msm: camera: Add spectra camera driver support

Bring over MSM camera module drivers as of msm-4.14 commit f87ce26e49
(Merge "msm: camera: icp: Send cmd buffer memory region to FW").
Additionally update timer api for camera modules.

CRs-Fixed: 2339309
Change-Id: I271474267329d4340e177e3053acaff8520314a1
Signed-off-by: Jigarkumar Zala <jzala@codeaurora.org>
This commit is contained in:
Jigarkumar Zala 2018-10-25 11:54:47 -07:00 committed by Gerrit - the friendly Code Review server
parent 42c43507e1
commit 1ec5823df7
330 changed files with 109703 additions and 1 deletions

View file

@ -137,6 +137,7 @@ source "drivers/media/platform/am437x/Kconfig"
source "drivers/media/platform/xilinx/Kconfig"
source "drivers/media/platform/rcar-vin/Kconfig"
source "drivers/media/platform/atmel/Kconfig"
source "drivers/media/platform/msm/Kconfig"
config VIDEO_TI_CAL
tristate "TI CAL (Camera Adaptation Layer) driver"
@ -641,3 +642,4 @@ config VIDEO_RCAR_DRIF
will be called rcar_drif.
endif # SDR_PLATFORM_DRIVERS
source "drivers/media/platform/msm/Kconfig"

View file

@ -94,5 +94,5 @@ obj-$(CONFIG_VIDEO_QCOM_CAMSS) += qcom/camss/
obj-$(CONFIG_VIDEO_QCOM_VENUS) += qcom/venus/
obj-y += meson/
obj-y += msm/
obj-y += cros-ec-cec/

View file

@ -1,3 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
menuconfig SPECTRA_CAMERA
bool "Qualcomm Technologies, Inc. Spectra camera and video capture support"
depends on ARCH_QCOM && VIDEO_V4L2 && I2C
help
Say Y here to enable selecting the video adapters for
Qualcomm Technologies, Inc. Spectra camera and video capture.
Enabling this adds support for the camera driver stack including sensor,
IFE and postprocessing drivers.
source "drivers/media/platform/msm/sde/Kconfig"

View file

@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-y += sde/
obj-$(CONFIG_SPECTRA_CAMERA) += camera/

View file

@ -0,0 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_SPECTRA_CAMERA) += cam_req_mgr/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_utils/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_core/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_sync/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_smmu/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_cpas/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_cdm/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_isp/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_module/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_jpeg/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_fd/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_lrme/

View file

@ -0,0 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
obj-$(CONFIG_SPECTRA_CAMERA) += cam_cdm_soc.o cam_cdm_util.o cam_cdm_intf.o \
cam_cdm_core_common.o cam_cdm_virtual_core.o \
cam_cdm_hw_core.o

View file

@ -0,0 +1,254 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_CDM_H_
#define _CAM_CDM_H_
#include <linux/dma-direction.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/random.h>
#include <linux/spinlock_types.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/bug.h>
#include "cam_cdm_intf_api.h"
#include "cam_soc_util.h"
#include "cam_cpas_api.h"
#include "cam_hw_intf.h"
#include "cam_hw.h"
#include "cam_debug_util.h"
#define CAM_MAX_SW_CDM_VERSION_SUPPORTED 1
#define CAM_SW_CDM_INDEX 0
#define CAM_CDM_INFLIGHT_WORKS 5
#define CAM_CDM_HW_RESET_TIMEOUT 300
#define CAM_CDM_HW_ID_MASK 0xF
#define CAM_CDM_HW_ID_SHIFT 0x5
#define CAM_CDM_CLIENTS_ID_MASK 0x1F
#define CAM_CDM_GET_HW_IDX(x) (((x) >> CAM_CDM_HW_ID_SHIFT) & \
CAM_CDM_HW_ID_MASK)
#define CAM_CDM_CREATE_CLIENT_HANDLE(hw_idx, client_idx) \
((((hw_idx) & CAM_CDM_HW_ID_MASK) << CAM_CDM_HW_ID_SHIFT) | \
((client_idx) & CAM_CDM_CLIENTS_ID_MASK))
#define CAM_CDM_GET_CLIENT_IDX(x) ((x) & CAM_CDM_CLIENTS_ID_MASK)
#define CAM_PER_CDM_MAX_REGISTERED_CLIENTS (CAM_CDM_CLIENTS_ID_MASK + 1)
#define CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM (CAM_CDM_HW_ID_MASK + 1)
/* enum cam_cdm_reg_attr - read, write, read and write permissions.*/
enum cam_cdm_reg_attr {
CAM_REG_ATTR_READ,
CAM_REG_ATTR_WRITE,
CAM_REG_ATTR_READ_WRITE,
};
/* enum cam_cdm_hw_process_intf_cmd - interface commands.*/
enum cam_cdm_hw_process_intf_cmd {
CAM_CDM_HW_INTF_CMD_ACQUIRE,
CAM_CDM_HW_INTF_CMD_RELEASE,
CAM_CDM_HW_INTF_CMD_SUBMIT_BL,
CAM_CDM_HW_INTF_CMD_RESET_HW,
CAM_CDM_HW_INTF_CMD_INVALID,
};
/* enum cam_cdm_regs - CDM driver offset enums.*/
enum cam_cdm_regs {
/*cfg_offsets 0*/
CDM_CFG_HW_VERSION,
CDM_CFG_TITAN_VERSION,
CDM_CFG_RST_CMD,
CDM_CFG_CGC_CFG,
CDM_CFG_CORE_CFG,
CDM_CFG_CORE_EN,
CDM_CFG_FE_CFG,
/*irq_offsets 7*/
CDM_IRQ_MASK,
CDM_IRQ_CLEAR,
CDM_IRQ_CLEAR_CMD,
CDM_IRQ_SET,
CDM_IRQ_SET_CMD,
CDM_IRQ_STATUS,
CDM_IRQ_USR_DATA,
/*BL FIFO Registers 14*/
CDM_BL_FIFO_BASE_REG,
CDM_BL_FIFO_LEN_REG,
CDM_BL_FIFO_STORE_REG,
CDM_BL_FIFO_CFG,
CDM_BL_FIFO_RB,
CDM_BL_FIFO_BASE_RB,
CDM_BL_FIFO_LEN_RB,
CDM_BL_FIFO_PENDING_REQ_RB,
/*CDM System Debug Registers 22*/
CDM_DBG_WAIT_STATUS,
CDM_DBG_SCRATCH_0_REG,
CDM_DBG_SCRATCH_1_REG,
CDM_DBG_SCRATCH_2_REG,
CDM_DBG_SCRATCH_3_REG,
CDM_DBG_SCRATCH_4_REG,
CDM_DBG_SCRATCH_5_REG,
CDM_DBG_SCRATCH_6_REG,
CDM_DBG_SCRATCH_7_REG,
CDM_DBG_LAST_AHB_ADDR,
CDM_DBG_LAST_AHB_DATA,
CDM_DBG_CORE_DBUG,
CDM_DBG_LAST_AHB_ERR_ADDR,
CDM_DBG_LAST_AHB_ERR_DATA,
CDM_DBG_CURRENT_BL_BASE,
CDM_DBG_CURRENT_BL_LEN,
CDM_DBG_CURRENT_USED_AHB_BASE,
CDM_DBG_DEBUG_STATUS,
/*FE Bus Miser Registers 40*/
CDM_BUS_MISR_CFG_0,
CDM_BUS_MISR_CFG_1,
CDM_BUS_MISR_RD_VAL,
/*Performance Counter registers 43*/
CDM_PERF_MON_CTRL,
CDM_PERF_MON_0,
CDM_PERF_MON_1,
CDM_PERF_MON_2,
/*Spare registers 47*/
CDM_SPARE,
};
/* struct cam_cdm_reg_offset - struct for offset with attribute.*/
struct cam_cdm_reg_offset {
uint32_t offset;
enum cam_cdm_reg_attr attribute;
};
/* struct cam_cdm_reg_offset_table - struct for whole offset table.*/
struct cam_cdm_reg_offset_table {
uint32_t first_offset;
uint32_t last_offset;
uint32_t reg_count;
const struct cam_cdm_reg_offset *offsets;
uint32_t offset_max_size;
};
/* enum cam_cdm_flags - Bit fields for CDM flags used */
enum cam_cdm_flags {
CAM_CDM_FLAG_SHARED_CDM,
CAM_CDM_FLAG_PRIVATE_CDM,
};
/* enum cam_cdm_type - Enum for possible CAM CDM types */
enum cam_cdm_type {
CAM_VIRTUAL_CDM,
CAM_HW_CDM,
};
/* enum cam_cdm_mem_base_index - Enum for possible CAM CDM types */
enum cam_cdm_mem_base_index {
CAM_HW_CDM_BASE_INDEX,
CAM_HW_CDM_MAX_INDEX = CAM_SOC_MAX_BLOCK,
};
/* struct cam_cdm_client - struct for cdm clients data.*/
struct cam_cdm_client {
struct cam_cdm_acquire_data data;
void __iomem *changebase_addr;
uint32_t stream_on;
uint32_t refcount;
struct mutex lock;
uint32_t handle;
};
/* struct cam_cdm_work_payload - struct for cdm work payload data.*/
struct cam_cdm_work_payload {
struct cam_hw_info *hw;
uint32_t irq_status;
uint32_t irq_data;
struct work_struct work;
};
/* enum cam_cdm_bl_cb_type - Enum for possible CAM CDM cb request types */
enum cam_cdm_bl_cb_type {
CAM_HW_CDM_BL_CB_CLIENT = 1,
CAM_HW_CDM_BL_CB_INTERNAL,
};
/* struct cam_cdm_bl_cb_request_entry - callback entry for work to process.*/
struct cam_cdm_bl_cb_request_entry {
uint8_t bl_tag;
enum cam_cdm_bl_cb_type request_type;
uint32_t client_hdl;
void *userdata;
uint32_t cookie;
struct list_head entry;
};
/* struct cam_cdm_hw_intf_cmd_submit_bl - cdm interface submit command.*/
struct cam_cdm_hw_intf_cmd_submit_bl {
uint32_t handle;
struct cam_cdm_bl_request *data;
};
/* struct cam_cdm_hw_mem - CDM hw memory struct */
struct cam_cdm_hw_mem {
int32_t handle;
uint32_t vaddr;
uintptr_t kmdvaddr;
size_t size;
};
/* struct cam_cdm - CDM hw device struct */
struct cam_cdm {
uint32_t index;
char name[128];
enum cam_cdm_id id;
enum cam_cdm_flags flags;
struct completion reset_complete;
struct completion bl_complete;
struct workqueue_struct *work_queue;
struct list_head bl_request_list;
struct cam_hw_version version;
uint32_t hw_version;
uint32_t hw_family_version;
struct cam_iommu_handle iommu_hdl;
struct cam_cdm_reg_offset_table *offset_tbl;
struct cam_cdm_utils_ops *ops;
struct cam_cdm_client *clients[CAM_PER_CDM_MAX_REGISTERED_CLIENTS];
uint8_t bl_tag;
atomic_t error;
atomic_t bl_done;
struct cam_cdm_hw_mem gen_irq;
uint32_t cpas_handle;
};
/* struct cam_cdm_private_dt_data - CDM hw custom dt data */
struct cam_cdm_private_dt_data {
bool dt_cdm_shared;
uint32_t dt_num_supported_clients;
const char *dt_cdm_client_name[CAM_PER_CDM_MAX_REGISTERED_CLIENTS];
};
/* struct cam_cdm_intf_devices - CDM mgr interface devices */
struct cam_cdm_intf_devices {
struct mutex lock;
uint32_t refcount;
struct cam_hw_intf *device;
struct cam_cdm_private_dt_data *data;
};
/* struct cam_cdm_intf_mgr - CDM mgr interface device struct */
struct cam_cdm_intf_mgr {
bool probe_done;
struct cam_cdm_intf_devices nodes[CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM];
uint32_t cdm_count;
uint32_t dt_supported_hw_cdm;
int32_t refcount;
};
int cam_cdm_intf_register_hw_cdm(struct cam_hw_intf *hw,
struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
uint32_t *index);
int cam_cdm_intf_deregister_hw_cdm(struct cam_hw_intf *hw,
struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
uint32_t index);
#endif /* _CAM_CDM_H_ */

View file

@ -0,0 +1,575 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/ion.h>
#include <linux/iommu.h>
#include <linux/timer.h>
#include <linux/kernel.h>
#include "cam_soc_util.h"
#include "cam_smmu_api.h"
#include "cam_io_util.h"
#include "cam_cdm_intf_api.h"
#include "cam_cdm.h"
#include "cam_cdm_soc.h"
#include "cam_cdm_core_common.h"
static void cam_cdm_get_client_refcount(struct cam_cdm_client *client)
{
mutex_lock(&client->lock);
CAM_DBG(CAM_CDM, "CDM client get refcount=%d",
client->refcount);
client->refcount++;
mutex_unlock(&client->lock);
}
static void cam_cdm_put_client_refcount(struct cam_cdm_client *client)
{
mutex_lock(&client->lock);
CAM_DBG(CAM_CDM, "CDM client put refcount=%d",
client->refcount);
if (client->refcount > 0) {
client->refcount--;
} else {
CAM_ERR(CAM_CDM, "Refcount put when zero");
WARN_ON(1);
}
mutex_unlock(&client->lock);
}
bool cam_cdm_set_cam_hw_version(
uint32_t ver, struct cam_hw_version *cam_version)
{
switch (ver) {
case CAM_CDM170_VERSION:
case CAM_CDM175_VERSION:
cam_version->major = (ver & 0xF0000000);
cam_version->minor = (ver & 0xFFF0000);
cam_version->incr = (ver & 0xFFFF);
cam_version->reserved = 0;
return true;
default:
CAM_ERR(CAM_CDM, "CDM Version=%x not supported in util", ver);
break;
}
return false;
}
bool cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
struct cam_cpas_irq_data *irq_data)
{
if (!irq_data)
return false;
CAM_DBG(CAM_CDM, "CPAS error callback type=%d", irq_data->irq_type);
return false;
}
struct cam_cdm_utils_ops *cam_cdm_get_ops(
uint32_t ver, struct cam_hw_version *cam_version, bool by_cam_version)
{
if (by_cam_version == false) {
switch (ver) {
case CAM_CDM170_VERSION:
case CAM_CDM175_VERSION:
return &CDM170_ops;
default:
CAM_ERR(CAM_CDM, "CDM Version=%x not supported in util",
ver);
}
} else if (cam_version) {
if (((cam_version->major == 1) &&
(cam_version->minor == 0) &&
(cam_version->incr == 0)) ||
((cam_version->major == 1) &&
(cam_version->minor == 1) &&
(cam_version->incr == 0))) {
CAM_DBG(CAM_CDM,
"cam_hw_version=%x:%x:%x supported",
cam_version->major, cam_version->minor,
cam_version->incr);
return &CDM170_ops;
}
CAM_ERR(CAM_CDM, "cam_hw_version=%x:%x:%x not supported",
cam_version->major, cam_version->minor,
cam_version->incr);
}
return NULL;
}
struct cam_cdm_bl_cb_request_entry *cam_cdm_find_request_by_bl_tag(
uint32_t tag, struct list_head *bl_list)
{
struct cam_cdm_bl_cb_request_entry *node;
list_for_each_entry(node, bl_list, entry) {
if (node->bl_tag == tag)
return node;
}
CAM_ERR(CAM_CDM, "Could not find the bl request for tag=%x", tag);
return NULL;
}
int cam_cdm_get_caps(void *hw_priv,
void *get_hw_cap_args, uint32_t arg_size)
{
struct cam_hw_info *cdm_hw = hw_priv;
struct cam_cdm *cdm_core;
if ((cdm_hw) && (cdm_hw->core_info) && (get_hw_cap_args) &&
(sizeof(struct cam_iommu_handle) == arg_size)) {
cdm_core = (struct cam_cdm *)cdm_hw->core_info;
*((struct cam_iommu_handle *)get_hw_cap_args) =
cdm_core->iommu_hdl;
return 0;
}
return -EINVAL;
}
int cam_cdm_find_free_client_slot(struct cam_cdm *hw)
{
int i;
for (i = 0; i < CAM_PER_CDM_MAX_REGISTERED_CLIENTS; i++) {
if (hw->clients[i] == NULL) {
CAM_DBG(CAM_CDM, "Found client slot %d", i);
return i;
}
}
CAM_ERR(CAM_CDM, "No more client slots");
return -EBUSY;
}
void cam_cdm_notify_clients(struct cam_hw_info *cdm_hw,
enum cam_cdm_cb_status status, void *data)
{
int i;
struct cam_cdm *core = NULL;
struct cam_cdm_client *client = NULL;
if (!cdm_hw) {
CAM_ERR(CAM_CDM, "CDM Notify called with NULL hw info");
return;
}
core = (struct cam_cdm *)cdm_hw->core_info;
if (status == CAM_CDM_CB_STATUS_BL_SUCCESS) {
int client_idx;
struct cam_cdm_bl_cb_request_entry *node =
(struct cam_cdm_bl_cb_request_entry *)data;
client_idx = CAM_CDM_GET_CLIENT_IDX(node->client_hdl);
client = core->clients[client_idx];
if ((!client) || (client->handle != node->client_hdl)) {
CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client,
node->client_hdl);
return;
}
cam_cdm_get_client_refcount(client);
if (client->data.cam_cdm_callback) {
CAM_DBG(CAM_CDM, "Calling client=%s cb cookie=%d",
client->data.identifier, node->cookie);
client->data.cam_cdm_callback(node->client_hdl,
node->userdata, CAM_CDM_CB_STATUS_BL_SUCCESS,
node->cookie);
CAM_DBG(CAM_CDM, "Exit client cb cookie=%d",
node->cookie);
} else {
CAM_ERR(CAM_CDM, "No cb registered for client hdl=%x",
node->client_hdl);
}
cam_cdm_put_client_refcount(client);
return;
}
for (i = 0; i < CAM_PER_CDM_MAX_REGISTERED_CLIENTS; i++) {
if (core->clients[i] != NULL) {
client = core->clients[i];
mutex_lock(&client->lock);
CAM_DBG(CAM_CDM, "Found client slot %d", i);
if (client->data.cam_cdm_callback) {
if (status == CAM_CDM_CB_STATUS_PAGEFAULT) {
unsigned long iova =
(unsigned long)data;
client->data.cam_cdm_callback(
client->handle,
client->data.userdata,
CAM_CDM_CB_STATUS_PAGEFAULT,
(iova & 0xFFFFFFFF));
}
} else {
CAM_ERR(CAM_CDM,
"No cb registered for client hdl=%x",
client->handle);
}
mutex_unlock(&client->lock);
}
}
}
int cam_cdm_stream_ops_internal(void *hw_priv,
void *start_args, bool operation)
{
struct cam_hw_info *cdm_hw = hw_priv;
struct cam_cdm *core = NULL;
int rc = -EPERM;
int client_idx;
struct cam_cdm_client *client;
uint32_t *handle = start_args;
if (!hw_priv)
return -EINVAL;
core = (struct cam_cdm *)cdm_hw->core_info;
client_idx = CAM_CDM_GET_CLIENT_IDX(*handle);
client = core->clients[client_idx];
if (!client) {
CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client, *handle);
return -EINVAL;
}
cam_cdm_get_client_refcount(client);
if (*handle != client->handle) {
CAM_ERR(CAM_CDM, "client id given handle=%x invalid", *handle);
cam_cdm_put_client_refcount(client);
return -EINVAL;
}
if (operation == true) {
if (true == client->stream_on) {
CAM_ERR(CAM_CDM,
"Invalid CDM client is already streamed ON");
cam_cdm_put_client_refcount(client);
return rc;
}
} else {
if (client->stream_on == false) {
CAM_ERR(CAM_CDM,
"Invalid CDM client is already streamed Off");
cam_cdm_put_client_refcount(client);
return rc;
}
}
mutex_lock(&cdm_hw->hw_mutex);
if (operation == true) {
if (!cdm_hw->open_count) {
struct cam_ahb_vote ahb_vote;
struct cam_axi_vote axi_vote;
ahb_vote.type = CAM_VOTE_ABSOLUTE;
ahb_vote.vote.level = CAM_SVS_VOTE;
axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
rc = cam_cpas_start(core->cpas_handle,
&ahb_vote, &axi_vote);
if (rc != 0) {
CAM_ERR(CAM_CDM, "CPAS start failed");
goto end;
}
CAM_DBG(CAM_CDM, "CDM init first time");
if (core->id == CAM_CDM_VIRTUAL) {
CAM_DBG(CAM_CDM,
"Virtual CDM HW init first time");
rc = 0;
} else {
CAM_DBG(CAM_CDM, "CDM HW init first time");
rc = cam_hw_cdm_init(hw_priv, NULL, 0);
if (rc == 0) {
rc = cam_hw_cdm_alloc_genirq_mem(
hw_priv);
if (rc != 0) {
CAM_ERR(CAM_CDM,
"Genirqalloc failed");
cam_hw_cdm_deinit(hw_priv,
NULL, 0);
}
} else {
CAM_ERR(CAM_CDM, "CDM HW init failed");
}
}
if (rc == 0) {
cdm_hw->open_count++;
client->stream_on = true;
} else {
if (cam_cpas_stop(core->cpas_handle))
CAM_ERR(CAM_CDM, "CPAS stop failed");
}
} else {
cdm_hw->open_count++;
CAM_DBG(CAM_CDM, "CDM HW already ON count=%d",
cdm_hw->open_count);
rc = 0;
client->stream_on = true;
}
} else {
if (cdm_hw->open_count) {
cdm_hw->open_count--;
CAM_DBG(CAM_CDM, "stream OFF CDM %d",
cdm_hw->open_count);
if (!cdm_hw->open_count) {
CAM_DBG(CAM_CDM, "CDM Deinit now");
if (core->id == CAM_CDM_VIRTUAL) {
CAM_DBG(CAM_CDM,
"Virtual CDM HW Deinit");
rc = 0;
} else {
CAM_DBG(CAM_CDM, "CDM HW Deinit now");
rc = cam_hw_cdm_deinit(
hw_priv, NULL, 0);
if (cam_hw_cdm_release_genirq_mem(
hw_priv))
CAM_ERR(CAM_CDM,
"Genirq release fail");
}
if (rc) {
CAM_ERR(CAM_CDM,
"Deinit failed in streamoff");
} else {
client->stream_on = false;
rc = cam_cpas_stop(core->cpas_handle);
if (rc)
CAM_ERR(CAM_CDM,
"CPAS stop failed");
}
} else {
client->stream_on = false;
rc = 0;
CAM_DBG(CAM_CDM,
"Client stream off success =%d",
cdm_hw->open_count);
}
} else {
CAM_DBG(CAM_CDM, "stream OFF CDM Invalid %d",
cdm_hw->open_count);
rc = -ENXIO;
}
}
end:
cam_cdm_put_client_refcount(client);
mutex_unlock(&cdm_hw->hw_mutex);
return rc;
}
int cam_cdm_stream_start(void *hw_priv,
void *start_args, uint32_t size)
{
int rc = 0;
if (!hw_priv)
return -EINVAL;
rc = cam_cdm_stream_ops_internal(hw_priv, start_args, true);
return rc;
}
int cam_cdm_stream_stop(void *hw_priv,
void *start_args, uint32_t size)
{
int rc = 0;
if (!hw_priv)
return -EINVAL;
rc = cam_cdm_stream_ops_internal(hw_priv, start_args, false);
return rc;
}
int cam_cdm_process_cmd(void *hw_priv,
uint32_t cmd, void *cmd_args, uint32_t arg_size)
{
struct cam_hw_info *cdm_hw = hw_priv;
struct cam_hw_soc_info *soc_data = NULL;
struct cam_cdm *core = NULL;
int rc = -EINVAL;
if ((!hw_priv) || (!cmd_args) ||
(cmd >= CAM_CDM_HW_INTF_CMD_INVALID))
return rc;
soc_data = &cdm_hw->soc_info;
core = (struct cam_cdm *)cdm_hw->core_info;
switch (cmd) {
case CAM_CDM_HW_INTF_CMD_SUBMIT_BL: {
struct cam_cdm_hw_intf_cmd_submit_bl *req;
int idx;
struct cam_cdm_client *client;
if (sizeof(struct cam_cdm_hw_intf_cmd_submit_bl) != arg_size) {
CAM_ERR(CAM_CDM, "Invalid CDM cmd %d arg size=%x", cmd,
arg_size);
break;
}
req = (struct cam_cdm_hw_intf_cmd_submit_bl *)cmd_args;
if ((req->data->type < 0) ||
(req->data->type > CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA)) {
CAM_ERR(CAM_CDM, "Invalid req bl cmd addr type=%d",
req->data->type);
break;
}
idx = CAM_CDM_GET_CLIENT_IDX(req->handle);
client = core->clients[idx];
if ((!client) || (req->handle != client->handle)) {
CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client,
req->handle);
break;
}
cam_cdm_get_client_refcount(client);
if ((req->data->flag == true) &&
(!client->data.cam_cdm_callback)) {
CAM_ERR(CAM_CDM,
"CDM request cb without registering cb");
cam_cdm_put_client_refcount(client);
break;
}
if (client->stream_on != true) {
CAM_ERR(CAM_CDM,
"Invalid CDM needs to be streamed ON first");
cam_cdm_put_client_refcount(client);
break;
}
if (core->id == CAM_CDM_VIRTUAL)
rc = cam_virtual_cdm_submit_bl(cdm_hw, req, client);
else
rc = cam_hw_cdm_submit_bl(cdm_hw, req, client);
cam_cdm_put_client_refcount(client);
break;
}
case CAM_CDM_HW_INTF_CMD_ACQUIRE: {
struct cam_cdm_acquire_data *data;
int idx;
struct cam_cdm_client *client;
if (sizeof(struct cam_cdm_acquire_data) != arg_size) {
CAM_ERR(CAM_CDM, "Invalid CDM cmd %d arg size=%x", cmd,
arg_size);
break;
}
mutex_lock(&cdm_hw->hw_mutex);
data = (struct cam_cdm_acquire_data *)cmd_args;
CAM_DBG(CAM_CDM, "Trying to acquire client=%s in hw idx=%d",
data->identifier, core->index);
idx = cam_cdm_find_free_client_slot(core);
if ((idx < 0) || (core->clients[idx])) {
mutex_unlock(&cdm_hw->hw_mutex);
CAM_ERR(CAM_CDM,
"Fail to client slots, client=%s in hw idx=%d",
data->identifier, core->index);
break;
}
core->clients[idx] = kzalloc(sizeof(struct cam_cdm_client),
GFP_KERNEL);
if (!core->clients[idx]) {
mutex_unlock(&cdm_hw->hw_mutex);
rc = -ENOMEM;
break;
}
mutex_unlock(&cdm_hw->hw_mutex);
client = core->clients[idx];
mutex_init(&client->lock);
data->ops = core->ops;
if (core->id == CAM_CDM_VIRTUAL) {
data->cdm_version.major = 1;
data->cdm_version.minor = 0;
data->cdm_version.incr = 0;
data->cdm_version.reserved = 0;
data->ops = cam_cdm_get_ops(0,
&data->cdm_version, true);
if (!data->ops) {
mutex_destroy(&client->lock);
mutex_lock(&cdm_hw->hw_mutex);
kfree(core->clients[idx]);
core->clients[idx] = NULL;
mutex_unlock(
&cdm_hw->hw_mutex);
rc = -EPERM;
CAM_ERR(CAM_CDM, "Invalid ops for virtual cdm");
break;
}
} else {
data->cdm_version = core->version;
}
cam_cdm_get_client_refcount(client);
mutex_lock(&client->lock);
memcpy(&client->data, data,
sizeof(struct cam_cdm_acquire_data));
client->handle = CAM_CDM_CREATE_CLIENT_HANDLE(
core->index,
idx);
client->stream_on = false;
data->handle = client->handle;
CAM_DBG(CAM_CDM, "Acquired client=%s in hwidx=%d",
data->identifier, core->index);
mutex_unlock(&client->lock);
rc = 0;
break;
}
case CAM_CDM_HW_INTF_CMD_RELEASE: {
uint32_t *handle = cmd_args;
int idx;
struct cam_cdm_client *client;
if (sizeof(uint32_t) != arg_size) {
CAM_ERR(CAM_CDM,
"Invalid CDM cmd %d size=%x for handle=%x",
cmd, arg_size, *handle);
return -EINVAL;
}
idx = CAM_CDM_GET_CLIENT_IDX(*handle);
mutex_lock(&cdm_hw->hw_mutex);
client = core->clients[idx];
if ((!client) || (*handle != client->handle)) {
CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x",
client, *handle);
mutex_unlock(&cdm_hw->hw_mutex);
break;
}
cam_cdm_put_client_refcount(client);
mutex_lock(&client->lock);
if (client->refcount != 0) {
CAM_ERR(CAM_CDM, "CDM Client refcount not zero %d",
client->refcount);
rc = -EPERM;
mutex_unlock(&client->lock);
mutex_unlock(&cdm_hw->hw_mutex);
break;
}
core->clients[idx] = NULL;
mutex_unlock(&client->lock);
mutex_destroy(&client->lock);
kfree(client);
mutex_unlock(&cdm_hw->hw_mutex);
rc = 0;
break;
}
case CAM_CDM_HW_INTF_CMD_RESET_HW: {
CAM_ERR(CAM_CDM, "CDM HW reset not supported for handle =%x",
*((uint32_t *)cmd_args));
break;
}
default:
CAM_ERR(CAM_CDM, "CDM HW intf command not valid =%d", cmd);
break;
}
return rc;
}

View file

@ -0,0 +1,44 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_CDM_CORE_COMMON_H_
#define _CAM_CDM_CORE_COMMON_H_
#include "cam_mem_mgr.h"
#define CAM_CDM170_VERSION 0x10000000
#define CAM_CDM175_VERSION 0x10010000
extern struct cam_cdm_utils_ops CDM170_ops;
int cam_hw_cdm_init(void *hw_priv, void *init_hw_args, uint32_t arg_size);
int cam_hw_cdm_deinit(void *hw_priv, void *init_hw_args, uint32_t arg_size);
int cam_hw_cdm_alloc_genirq_mem(void *hw_priv);
int cam_hw_cdm_release_genirq_mem(void *hw_priv);
int cam_cdm_get_caps(void *hw_priv, void *get_hw_cap_args, uint32_t arg_size);
int cam_cdm_stream_ops_internal(void *hw_priv, void *start_args,
bool operation);
int cam_cdm_stream_start(void *hw_priv, void *start_args, uint32_t size);
int cam_cdm_stream_stop(void *hw_priv, void *start_args, uint32_t size);
int cam_cdm_process_cmd(void *hw_priv, uint32_t cmd, void *cmd_args,
uint32_t arg_size);
bool cam_cdm_set_cam_hw_version(
uint32_t ver, struct cam_hw_version *cam_version);
bool cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
struct cam_cpas_irq_data *irq_data);
struct cam_cdm_utils_ops *cam_cdm_get_ops(
uint32_t ver, struct cam_hw_version *cam_version, bool by_cam_version);
int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
struct cam_cdm_hw_intf_cmd_submit_bl *req,
struct cam_cdm_client *client);
int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
struct cam_cdm_hw_intf_cmd_submit_bl *req,
struct cam_cdm_client *client);
struct cam_cdm_bl_cb_request_entry *cam_cdm_find_request_by_bl_tag(
uint32_t tag, struct list_head *bl_list);
void cam_cdm_notify_clients(struct cam_hw_info *cdm_hw,
enum cam_cdm_cb_status status, void *data);
#endif /* _CAM_CDM_CORE_COMMON_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,575 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/ion.h>
#include <linux/iommu.h>
#include <linux/timer.h>
#include <linux/kernel.h>
#include "cam_cdm_intf_api.h"
#include "cam_cdm.h"
#include "cam_cdm_virtual.h"
#include "cam_soc_util.h"
#include "cam_cdm_soc.h"
static struct cam_cdm_intf_mgr cdm_mgr;
static DEFINE_MUTEX(cam_cdm_mgr_lock);
static const struct of_device_id msm_cam_cdm_intf_dt_match[] = {
{ .compatible = "qcom,cam-cdm-intf", },
{}
};
static int get_cdm_mgr_refcount(void)
{
int rc = 0;
mutex_lock(&cam_cdm_mgr_lock);
if (cdm_mgr.probe_done == false) {
CAM_ERR(CAM_CDM, "CDM intf mgr not probed yet");
rc = -EPERM;
} else {
CAM_DBG(CAM_CDM, "CDM intf mgr get refcount=%d",
cdm_mgr.refcount);
cdm_mgr.refcount++;
}
mutex_unlock(&cam_cdm_mgr_lock);
return rc;
}
static void put_cdm_mgr_refcount(void)
{
mutex_lock(&cam_cdm_mgr_lock);
if (cdm_mgr.probe_done == false) {
CAM_ERR(CAM_CDM, "CDM intf mgr not probed yet");
} else {
CAM_DBG(CAM_CDM, "CDM intf mgr put refcount=%d",
cdm_mgr.refcount);
if (cdm_mgr.refcount > 0) {
cdm_mgr.refcount--;
} else {
CAM_ERR(CAM_CDM, "Refcount put when zero");
WARN_ON(1);
}
}
mutex_unlock(&cam_cdm_mgr_lock);
}
static int get_cdm_iommu_handle(struct cam_iommu_handle *cdm_handles,
uint32_t hw_idx)
{
int rc = -EPERM;
struct cam_hw_intf *hw = cdm_mgr.nodes[hw_idx].device;
if (hw->hw_ops.get_hw_caps) {
rc = hw->hw_ops.get_hw_caps(hw->hw_priv, cdm_handles,
sizeof(struct cam_iommu_handle));
}
return rc;
}
static int get_cdm_index_by_id(char *identifier,
uint32_t cell_index, uint32_t *hw_index)
{
int rc = -EPERM, i, j;
char client_name[128];
CAM_DBG(CAM_CDM, "Looking for HW id of =%s and index=%d",
identifier, cell_index);
snprintf(client_name, sizeof(client_name), "%s", identifier);
CAM_DBG(CAM_CDM, "Looking for HW id of %s count:%d", client_name,
cdm_mgr.cdm_count);
mutex_lock(&cam_cdm_mgr_lock);
for (i = 0; i < cdm_mgr.cdm_count; i++) {
mutex_lock(&cdm_mgr.nodes[i].lock);
CAM_DBG(CAM_CDM, "dt_num_supported_clients=%d",
cdm_mgr.nodes[i].data->dt_num_supported_clients);
for (j = 0; j <
cdm_mgr.nodes[i].data->dt_num_supported_clients; j++) {
CAM_DBG(CAM_CDM, "client name:%s",
cdm_mgr.nodes[i].data->dt_cdm_client_name[j]);
if (!strcmp(
cdm_mgr.nodes[i].data->dt_cdm_client_name[j],
client_name)) {
rc = 0;
*hw_index = i;
break;
}
}
mutex_unlock(&cdm_mgr.nodes[i].lock);
if (rc == 0)
break;
}
mutex_unlock(&cam_cdm_mgr_lock);
return rc;
}
int cam_cdm_get_iommu_handle(char *identifier,
struct cam_iommu_handle *cdm_handles)
{
int i, j, rc = -EPERM;
if ((!identifier) || (!cdm_handles))
return -EINVAL;
if (get_cdm_mgr_refcount()) {
CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
return rc;
}
CAM_DBG(CAM_CDM, "Looking for Iommu handle of %s", identifier);
for (i = 0; i < cdm_mgr.cdm_count; i++) {
mutex_lock(&cdm_mgr.nodes[i].lock);
if (!cdm_mgr.nodes[i].data) {
mutex_unlock(&cdm_mgr.nodes[i].lock);
continue;
}
for (j = 0; j <
cdm_mgr.nodes[i].data->dt_num_supported_clients;
j++) {
if (!strcmp(
cdm_mgr.nodes[i].data->dt_cdm_client_name[j],
identifier)) {
rc = get_cdm_iommu_handle(cdm_handles, i);
break;
}
}
mutex_unlock(&cdm_mgr.nodes[i].lock);
if (rc == 0)
break;
}
put_cdm_mgr_refcount();
return rc;
}
EXPORT_SYMBOL(cam_cdm_get_iommu_handle);
int cam_cdm_acquire(struct cam_cdm_acquire_data *data)
{
int rc = -EPERM;
struct cam_hw_intf *hw;
uint32_t hw_index = 0;
if ((!data) || (!data->base_array_cnt))
return -EINVAL;
if (get_cdm_mgr_refcount()) {
CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
return rc;
}
if (data->id > CAM_CDM_HW_ANY) {
CAM_ERR(CAM_CDM,
"only CAM_CDM_VIRTUAL/CAM_CDM_HW_ANY is supported");
rc = -EPERM;
goto end;
}
rc = get_cdm_index_by_id(data->identifier, data->cell_index,
&hw_index);
if ((rc < 0) && (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM)) {
CAM_ERR(CAM_CDM, "Failed to identify associated hw id");
goto end;
} else {
CAM_DBG(CAM_CDM, "hw_index:%d", hw_index);
hw = cdm_mgr.nodes[hw_index].device;
if (hw && hw->hw_ops.process_cmd) {
rc = hw->hw_ops.process_cmd(hw->hw_priv,
CAM_CDM_HW_INTF_CMD_ACQUIRE, data,
sizeof(struct cam_cdm_acquire_data));
if (rc < 0) {
CAM_ERR(CAM_CDM, "CDM hw acquire failed");
goto end;
}
} else {
CAM_ERR(CAM_CDM, "idx %d doesn't have acquire ops",
hw_index);
rc = -EPERM;
}
}
end:
if (rc < 0) {
CAM_ERR(CAM_CDM, "CDM acquire failed for id=%d name=%s, idx=%d",
data->id, data->identifier, data->cell_index);
put_cdm_mgr_refcount();
}
return rc;
}
EXPORT_SYMBOL(cam_cdm_acquire);
int cam_cdm_release(uint32_t handle)
{
uint32_t hw_index;
int rc = -EPERM;
struct cam_hw_intf *hw;
if (get_cdm_mgr_refcount()) {
CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
return rc;
}
hw_index = CAM_CDM_GET_HW_IDX(handle);
if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
hw = cdm_mgr.nodes[hw_index].device;
if (hw && hw->hw_ops.process_cmd) {
rc = hw->hw_ops.process_cmd(hw->hw_priv,
CAM_CDM_HW_INTF_CMD_RELEASE, &handle,
sizeof(handle));
if (rc < 0)
CAM_ERR(CAM_CDM,
"hw release failed for handle=%x",
handle);
} else
CAM_ERR(CAM_CDM, "hw idx %d doesn't have release ops",
hw_index);
}
put_cdm_mgr_refcount();
if (rc == 0)
put_cdm_mgr_refcount();
return rc;
}
EXPORT_SYMBOL(cam_cdm_release);
int cam_cdm_submit_bls(uint32_t handle, struct cam_cdm_bl_request *data)
{
uint32_t hw_index;
int rc = -EINVAL;
struct cam_hw_intf *hw;
if (!data)
return rc;
if (get_cdm_mgr_refcount()) {
CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
rc = -EPERM;
return rc;
}
hw_index = CAM_CDM_GET_HW_IDX(handle);
if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
struct cam_cdm_hw_intf_cmd_submit_bl req;
hw = cdm_mgr.nodes[hw_index].device;
if (hw && hw->hw_ops.process_cmd) {
req.data = data;
req.handle = handle;
rc = hw->hw_ops.process_cmd(hw->hw_priv,
CAM_CDM_HW_INTF_CMD_SUBMIT_BL, &req,
sizeof(struct cam_cdm_hw_intf_cmd_submit_bl));
if (rc < 0)
CAM_ERR(CAM_CDM,
"hw submit bl failed for handle=%x",
handle);
} else {
CAM_ERR(CAM_CDM, "hw idx %d doesn't have submit ops",
hw_index);
}
}
put_cdm_mgr_refcount();
return rc;
}
EXPORT_SYMBOL(cam_cdm_submit_bls);
int cam_cdm_stream_on(uint32_t handle)
{
uint32_t hw_index;
int rc = -EINVAL;
struct cam_hw_intf *hw;
if (get_cdm_mgr_refcount()) {
CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
rc = -EPERM;
return rc;
}
hw_index = CAM_CDM_GET_HW_IDX(handle);
if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
hw = cdm_mgr.nodes[hw_index].device;
if (hw && hw->hw_ops.start) {
rc = hw->hw_ops.start(hw->hw_priv, &handle,
sizeof(uint32_t));
if (rc < 0)
CAM_ERR(CAM_CDM,
"hw start failed handle=%x",
handle);
} else {
CAM_ERR(CAM_CDM,
"hw idx %d doesn't have start ops",
hw_index);
}
}
put_cdm_mgr_refcount();
return rc;
}
EXPORT_SYMBOL(cam_cdm_stream_on);
int cam_cdm_stream_off(uint32_t handle)
{
uint32_t hw_index;
int rc = -EINVAL;
struct cam_hw_intf *hw;
if (get_cdm_mgr_refcount()) {
CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
rc = -EPERM;
return rc;
}
hw_index = CAM_CDM_GET_HW_IDX(handle);
if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
hw = cdm_mgr.nodes[hw_index].device;
if (hw && hw->hw_ops.stop) {
rc = hw->hw_ops.stop(hw->hw_priv, &handle,
sizeof(uint32_t));
if (rc < 0)
CAM_ERR(CAM_CDM, "hw stop failed handle=%x",
handle);
} else {
CAM_ERR(CAM_CDM, "hw idx %d doesn't have stop ops",
hw_index);
}
}
put_cdm_mgr_refcount();
return rc;
}
EXPORT_SYMBOL(cam_cdm_stream_off);
int cam_cdm_reset_hw(uint32_t handle)
{
uint32_t hw_index;
int rc = -EINVAL;
struct cam_hw_intf *hw;
if (get_cdm_mgr_refcount()) {
CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
rc = -EPERM;
return rc;
}
hw_index = CAM_CDM_GET_HW_IDX(handle);
if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
hw = cdm_mgr.nodes[hw_index].device;
if (hw && hw->hw_ops.process_cmd) {
rc = hw->hw_ops.process_cmd(hw->hw_priv,
CAM_CDM_HW_INTF_CMD_RESET_HW, &handle,
sizeof(handle));
if (rc < 0)
CAM_ERR(CAM_CDM,
"CDM hw release failed for handle=%x",
handle);
} else {
CAM_ERR(CAM_CDM, "hw idx %d doesn't have release ops",
hw_index);
}
}
put_cdm_mgr_refcount();
return rc;
}
EXPORT_SYMBOL(cam_cdm_reset_hw);
int cam_cdm_intf_register_hw_cdm(struct cam_hw_intf *hw,
struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
uint32_t *index)
{
int rc = -EINVAL;
if ((!hw) || (!data) || (!index))
return rc;
if (get_cdm_mgr_refcount()) {
CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
return rc;
}
mutex_lock(&cam_cdm_mgr_lock);
if ((type == CAM_VIRTUAL_CDM) &&
(!cdm_mgr.nodes[CAM_SW_CDM_INDEX].device)) {
mutex_lock(&cdm_mgr.nodes[CAM_SW_CDM_INDEX].lock);
cdm_mgr.nodes[CAM_SW_CDM_INDEX].device = hw;
cdm_mgr.nodes[CAM_SW_CDM_INDEX].data = data;
*index = cdm_mgr.cdm_count;
mutex_unlock(&cdm_mgr.nodes[CAM_SW_CDM_INDEX].lock);
cdm_mgr.cdm_count++;
rc = 0;
} else if ((type == CAM_HW_CDM) && (cdm_mgr.cdm_count > 0)) {
mutex_lock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
cdm_mgr.nodes[cdm_mgr.cdm_count].device = hw;
cdm_mgr.nodes[cdm_mgr.cdm_count].data = data;
*index = cdm_mgr.cdm_count;
mutex_unlock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
cdm_mgr.cdm_count++;
rc = 0;
} else {
CAM_ERR(CAM_CDM, "CDM registration failed type=%d count=%d",
type, cdm_mgr.cdm_count);
}
mutex_unlock(&cam_cdm_mgr_lock);
put_cdm_mgr_refcount();
return rc;
}
int cam_cdm_intf_deregister_hw_cdm(struct cam_hw_intf *hw,
struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
uint32_t index)
{
int rc = -EINVAL;
if ((!hw) || (!data))
return rc;
if (get_cdm_mgr_refcount()) {
CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
rc = -EPERM;
return rc;
}
mutex_lock(&cam_cdm_mgr_lock);
if ((type == CAM_VIRTUAL_CDM) &&
(hw == cdm_mgr.nodes[CAM_SW_CDM_INDEX].device) &&
(index == CAM_SW_CDM_INDEX)) {
mutex_lock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
cdm_mgr.nodes[CAM_SW_CDM_INDEX].device = NULL;
cdm_mgr.nodes[CAM_SW_CDM_INDEX].data = NULL;
mutex_unlock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
rc = 0;
} else if ((type == CAM_HW_CDM) &&
(hw == cdm_mgr.nodes[index].device)) {
mutex_lock(&cdm_mgr.nodes[index].lock);
cdm_mgr.nodes[index].device = NULL;
cdm_mgr.nodes[index].data = NULL;
mutex_unlock(&cdm_mgr.nodes[index].lock);
cdm_mgr.cdm_count--;
rc = 0;
} else {
CAM_ERR(CAM_CDM, "CDM Deregistration failed type=%d index=%d",
type, index);
}
mutex_unlock(&cam_cdm_mgr_lock);
put_cdm_mgr_refcount();
return rc;
}
static int cam_cdm_intf_probe(struct platform_device *pdev)
{
int i, rc;
rc = cam_cdm_intf_mgr_soc_get_dt_properties(pdev, &cdm_mgr);
if (rc) {
CAM_ERR(CAM_CDM, "Failed to get dt properties");
return rc;
}
mutex_lock(&cam_cdm_mgr_lock);
for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
mutex_init(&cdm_mgr.nodes[i].lock);
cdm_mgr.nodes[i].device = NULL;
cdm_mgr.nodes[i].data = NULL;
cdm_mgr.nodes[i].refcount = 0;
}
cdm_mgr.probe_done = true;
cdm_mgr.refcount = 0;
mutex_unlock(&cam_cdm_mgr_lock);
rc = cam_virtual_cdm_probe(pdev);
if (rc) {
mutex_lock(&cam_cdm_mgr_lock);
cdm_mgr.probe_done = false;
for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
if (cdm_mgr.nodes[i].device || cdm_mgr.nodes[i].data ||
(cdm_mgr.nodes[i].refcount != 0))
CAM_ERR(CAM_CDM,
"Valid node present in index=%d", i);
mutex_destroy(&cdm_mgr.nodes[i].lock);
cdm_mgr.nodes[i].device = NULL;
cdm_mgr.nodes[i].data = NULL;
cdm_mgr.nodes[i].refcount = 0;
}
mutex_unlock(&cam_cdm_mgr_lock);
}
CAM_DBG(CAM_CDM, "CDM Intf probe done");
return rc;
}
static int cam_cdm_intf_remove(struct platform_device *pdev)
{
int i, rc = -EBUSY;
if (get_cdm_mgr_refcount()) {
CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
return rc;
}
if (cam_virtual_cdm_remove(pdev)) {
CAM_ERR(CAM_CDM, "Virtual CDM remove failed");
goto end;
}
put_cdm_mgr_refcount();
mutex_lock(&cam_cdm_mgr_lock);
if (cdm_mgr.refcount != 0) {
CAM_ERR(CAM_CDM, "cdm manger refcount not zero %d",
cdm_mgr.refcount);
goto end;
}
for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
if (cdm_mgr.nodes[i].device || cdm_mgr.nodes[i].data ||
(cdm_mgr.nodes[i].refcount != 0)) {
CAM_ERR(CAM_CDM, "Valid node present in index=%d", i);
mutex_unlock(&cam_cdm_mgr_lock);
goto end;
}
mutex_destroy(&cdm_mgr.nodes[i].lock);
cdm_mgr.nodes[i].device = NULL;
cdm_mgr.nodes[i].data = NULL;
cdm_mgr.nodes[i].refcount = 0;
}
cdm_mgr.probe_done = false;
rc = 0;
end:
mutex_unlock(&cam_cdm_mgr_lock);
return rc;
}
static struct platform_driver cam_cdm_intf_driver = {
.probe = cam_cdm_intf_probe,
.remove = cam_cdm_intf_remove,
.driver = {
.name = "msm_cam_cdm_intf",
.owner = THIS_MODULE,
.of_match_table = msm_cam_cdm_intf_dt_match,
.suppress_bind_attrs = true,
},
};
static int __init cam_cdm_intf_init_module(void)
{
return platform_driver_register(&cam_cdm_intf_driver);
}
static void __exit cam_cdm_intf_exit_module(void)
{
platform_driver_unregister(&cam_cdm_intf_driver);
}
module_init(cam_cdm_intf_init_module);
module_exit(cam_cdm_intf_exit_module);
MODULE_DESCRIPTION("MSM Camera CDM Intf driver");
MODULE_LICENSE("GPL v2");

View file

@ -0,0 +1,202 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_CDM_API_H_
#define _CAM_CDM_API_H_
#include <media/cam_defs.h>
#include "cam_cdm_util.h"
#include "cam_soc_util.h"
/* enum cam_cdm_id - Enum for possible CAM CDM hardwares */
enum cam_cdm_id {
CAM_CDM_VIRTUAL,
CAM_CDM_HW_ANY,
CAM_CDM_CPAS_0,
CAM_CDM_IPE0,
CAM_CDM_IPE1,
CAM_CDM_BPS,
CAM_CDM_VFE,
CAM_CDM_MAX
};
/* enum cam_cdm_cb_status - Enum for possible CAM CDM callback */
enum cam_cdm_cb_status {
CAM_CDM_CB_STATUS_BL_SUCCESS,
CAM_CDM_CB_STATUS_INVALID_BL_CMD,
CAM_CDM_CB_STATUS_PAGEFAULT,
CAM_CDM_CB_STATUS_HW_RESET_ONGOING,
CAM_CDM_CB_STATUS_HW_RESET_DONE,
CAM_CDM_CB_STATUS_UNKNOWN_ERROR,
};
/* enum cam_cdm_bl_cmd_addr_type - Enum for possible CDM bl cmd addr types */
enum cam_cdm_bl_cmd_addr_type {
CAM_CDM_BL_CMD_TYPE_MEM_HANDLE,
CAM_CDM_BL_CMD_TYPE_HW_IOVA,
CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA,
};
/**
* struct cam_cdm_acquire_data - Cam CDM acquire data structure
*
* @identifier : Input identifier string which is the device label from dt
* like vfe, ife, jpeg etc
* @cell_index : Input integer identifier pointing to the cell index from dt
* of the device. This can be used to form a unique string
* with @identifier like vfe0, ife1, jpeg0 etc
* @id : ID of a specific or any CDM HW which needs to be acquired.
* @userdata : Input private data which will be returned as part
* of callback.
* @cam_cdm_callback : Input callback pointer for triggering the
* callbacks from CDM driver
* @handle : CDM Client handle
* @userdata : Private data given at the time of acquire
* @status : Callback status
* @cookie : Cookie if the callback is gen irq status
* @base_array_cnt : Input number of ioremapped address pair pointing
* in base_array, needed only if selected cdm is a virtual.
* @base_array : Input pointer to ioremapped address pair arrary
* needed only if selected cdm is a virtual.
* @cdm_version : CDM version is output while acquiring HW cdm and
* it is Input while acquiring virtual cdm, Currently fixing it
* to one version below acquire API.
* @ops : Output pointer updated by cdm driver to the CDM
* util ops for this HW version of CDM acquired.
* @handle : Output Unique handle generated for this acquire
*
*/
struct cam_cdm_acquire_data {
char identifier[128];
uint32_t cell_index;
enum cam_cdm_id id;
void *userdata;
void (*cam_cdm_callback)(uint32_t handle, void *userdata,
enum cam_cdm_cb_status status, uint64_t cookie);
uint32_t base_array_cnt;
struct cam_soc_reg_map *base_array[CAM_SOC_MAX_BLOCK];
struct cam_hw_version cdm_version;
struct cam_cdm_utils_ops *ops;
uint32_t handle;
};
/**
* struct cam_cdm_bl_cmd - Cam CDM HW bl command
*
* @bl_addr : Union of all three type for CDM BL commands
* @mem_handle : Input mem handle of bl cmd
* @offset : Input offset of the actual bl cmd in the memory pointed
* by mem_handle
* @len : Input length of the BL command, Cannot be more than 1MB and
* this is will be validated with offset+size of the memory pointed
* by mem_handle
*
*/
struct cam_cdm_bl_cmd {
union {
int32_t mem_handle;
uint32_t *hw_iova;
uintptr_t kernel_iova;
} bl_addr;
uint32_t offset;
uint32_t len;
};
/**
* struct cam_cdm_bl_request - Cam CDM HW base & length (BL) request
*
* @flag : 1 for callback needed and 0 for no callback when this BL
* request is done
* @userdata :Input private data which will be returned as part
* of callback if request for this bl request in flags.
* @cookie : Cookie if the callback is gen irq status
* @type : type of the submitted bl cmd address.
* @cmd_arrary_count : Input number of BL commands to be submitted to CDM
* @bl_cmd_array : Input payload holding the BL cmd's arrary
* to be sumbitted.
*
*/
struct cam_cdm_bl_request {
int flag;
void *userdata;
uint64_t cookie;
enum cam_cdm_bl_cmd_addr_type type;
uint32_t cmd_arrary_count;
struct cam_cdm_bl_cmd cmd[1];
};
/**
* @brief : API to get the CDM capabilities for a camera device type
*
* @identifier : Input pointer to a string which is the device label from dt
* like vfe, ife, jpeg etc, We do not need cell index
* assuming all devices of a single type maps to one SMMU
* client
* @cdm_handles : Input iommu handle memory pointer to update handles
*
* @return 0 on success
*/
int cam_cdm_get_iommu_handle(char *identifier,
struct cam_iommu_handle *cdm_handles);
/**
* @brief : API to acquire a CDM
*
* @data : Input data for the CDM to be acquired
*
* @return 0 on success
*/
int cam_cdm_acquire(struct cam_cdm_acquire_data *data);
/**
* @brief : API to release a previously acquired CDM
*
* @handle : Input handle for the CDM to be released
*
* @return 0 on success
*/
int cam_cdm_release(uint32_t handle);
/**
* @brief : API to submit the base & length (BL's) for acquired CDM
*
* @handle : Input cdm handle to which the BL's needs to be sumbitted.
* @data : Input pointer to the BL's to be sumbitted
*
* @return 0 on success
*/
int cam_cdm_submit_bls(uint32_t handle, struct cam_cdm_bl_request *data);
/**
* @brief : API to stream ON a previously acquired CDM,
* during this we turn on/off clocks/power based on active clients.
*
* @handle : Input handle for the CDM to be released
*
* @return 0 on success
*/
int cam_cdm_stream_on(uint32_t handle);
/**
* @brief : API to stream OFF a previously acquired CDM,
* during this we turn on/off clocks/power based on active clients.
*
* @handle : Input handle for the CDM to be released
*
* @return 0 on success
*/
int cam_cdm_stream_off(uint32_t handle);
/**
* @brief : API to reset previously acquired CDM,
* this can be only performed only the CDM is private.
*
* @handle : Input handle of the CDM to reset
*
* @return 0 on success
*/
int cam_cdm_reset_hw(uint32_t handle);
#endif /* _CAM_CDM_API_H_ */

View file

@ -0,0 +1,201 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/ion.h>
#include <linux/iommu.h>
#include <linux/timer.h>
#include <linux/kernel.h>
#include "cam_soc_util.h"
#include "cam_smmu_api.h"
#include "cam_cdm.h"
#include "cam_soc_util.h"
#include "cam_io_util.h"
#define CAM_CDM_OFFSET_FROM_REG(x, y) ((x)->offsets[y].offset)
#define CAM_CDM_ATTR_FROM_REG(x, y) ((x)->offsets[y].attribute)
bool cam_cdm_read_hw_reg(struct cam_hw_info *cdm_hw,
enum cam_cdm_regs reg, uint32_t *value)
{
void __iomem *reg_addr;
struct cam_cdm *cdm = (struct cam_cdm *)cdm_hw->core_info;
void __iomem *base =
cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].mem_base;
resource_size_t mem_len =
cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].size;
CAM_DBG(CAM_CDM, "E: b=%pK blen=%d reg=%x off=%x", (void __iomem *)base,
(int)mem_len, reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl,
reg)));
CAM_DBG(CAM_CDM, "E: b=%pK reg=%x off=%x", (void __iomem *)base,
reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)));
if ((reg > cdm->offset_tbl->offset_max_size) ||
(reg > cdm->offset_tbl->last_offset)) {
CAM_ERR_RATE_LIMIT(CAM_CDM, "Invalid reg=%d\n", reg);
goto permission_error;
} else {
reg_addr = (base + (CAM_CDM_OFFSET_FROM_REG(
cdm->offset_tbl, reg)));
if (reg_addr > (base + mem_len)) {
CAM_ERR_RATE_LIMIT(CAM_CDM,
"Invalid mapped region %d", reg);
goto permission_error;
}
*value = cam_io_r_mb(reg_addr);
CAM_DBG(CAM_CDM, "X b=%pK reg=%x off=%x val=%x",
(void __iomem *)base, reg,
(CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)),
*value);
return false;
}
permission_error:
*value = 0;
return true;
}
bool cam_cdm_write_hw_reg(struct cam_hw_info *cdm_hw,
enum cam_cdm_regs reg, uint32_t value)
{
void __iomem *reg_addr;
struct cam_cdm *cdm = (struct cam_cdm *)cdm_hw->core_info;
void __iomem *base =
cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].mem_base;
resource_size_t mem_len =
cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].size;
CAM_DBG(CAM_CDM, "E: b=%pK reg=%x off=%x val=%x", (void __iomem *)base,
reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)), value);
if ((reg > cdm->offset_tbl->offset_max_size) ||
(reg > cdm->offset_tbl->last_offset)) {
CAM_ERR_RATE_LIMIT(CAM_CDM, "CDM accessing invalid reg=%d\n",
reg);
goto permission_error;
} else {
reg_addr = (base + CAM_CDM_OFFSET_FROM_REG(
cdm->offset_tbl, reg));
if (reg_addr > (base + mem_len)) {
CAM_ERR_RATE_LIMIT(CAM_CDM,
"Accessing invalid region %d:%d\n",
reg, (CAM_CDM_OFFSET_FROM_REG(
cdm->offset_tbl, reg)));
goto permission_error;
}
cam_io_w_mb(value, reg_addr);
return false;
}
permission_error:
return true;
}
int cam_cdm_soc_load_dt_private(struct platform_device *pdev,
struct cam_cdm_private_dt_data *ptr)
{
int i, rc = -EINVAL;
ptr->dt_num_supported_clients = of_property_count_strings(
pdev->dev.of_node,
"cdm-client-names");
CAM_DBG(CAM_CDM, "Num supported cdm_client = %d",
ptr->dt_num_supported_clients);
if (ptr->dt_num_supported_clients >
CAM_PER_CDM_MAX_REGISTERED_CLIENTS) {
CAM_ERR(CAM_CDM, "Invalid count of client names count=%d",
ptr->dt_num_supported_clients);
rc = -EINVAL;
return rc;
}
if (ptr->dt_num_supported_clients < 0) {
CAM_DBG(CAM_CDM, "No cdm client names found");
ptr->dt_num_supported_clients = 0;
ptr->dt_cdm_shared = false;
} else {
ptr->dt_cdm_shared = true;
}
for (i = 0; i < ptr->dt_num_supported_clients; i++) {
rc = of_property_read_string_index(pdev->dev.of_node,
"cdm-client-names", i, &(ptr->dt_cdm_client_name[i]));
CAM_DBG(CAM_CDM, "cdm-client-names[%d] = %s", i,
ptr->dt_cdm_client_name[i]);
if (rc < 0) {
CAM_ERR(CAM_CDM, "Reading cdm-client-names failed");
break;
}
}
return rc;
}
int cam_hw_cdm_soc_get_dt_properties(struct cam_hw_info *cdm_hw,
const struct of_device_id *table)
{
int rc;
struct cam_hw_soc_info *soc_ptr;
const struct of_device_id *id;
if (!cdm_hw || (cdm_hw->soc_info.soc_private)
|| !(cdm_hw->soc_info.pdev))
return -EINVAL;
soc_ptr = &cdm_hw->soc_info;
rc = cam_soc_util_get_dt_properties(soc_ptr);
if (rc != 0) {
CAM_ERR(CAM_CDM, "Failed to retrieve the CDM dt properties");
} else {
soc_ptr->soc_private = kzalloc(
sizeof(struct cam_cdm_private_dt_data),
GFP_KERNEL);
if (!soc_ptr->soc_private)
return -ENOMEM;
rc = cam_cdm_soc_load_dt_private(soc_ptr->pdev,
soc_ptr->soc_private);
if (rc != 0) {
CAM_ERR(CAM_CDM, "Failed to load CDM dt private data");
goto error;
}
id = of_match_node(table, soc_ptr->pdev->dev.of_node);
if ((!id) || !(id->data)) {
CAM_ERR(CAM_CDM, "Failed to retrieve the CDM id table");
goto error;
}
CAM_DBG(CAM_CDM, "CDM Hw Id compatible =%s", id->compatible);
((struct cam_cdm *)cdm_hw->core_info)->offset_tbl =
(struct cam_cdm_reg_offset_table *)id->data;
strlcpy(((struct cam_cdm *)cdm_hw->core_info)->name,
id->compatible,
sizeof(((struct cam_cdm *)cdm_hw->core_info)->name));
}
return rc;
error:
rc = -EINVAL;
kfree(soc_ptr->soc_private);
soc_ptr->soc_private = NULL;
return rc;
}
int cam_cdm_intf_mgr_soc_get_dt_properties(
struct platform_device *pdev, struct cam_cdm_intf_mgr *mgr)
{
int rc;
rc = of_property_read_u32(pdev->dev.of_node,
"num-hw-cdm", &mgr->dt_supported_hw_cdm);
CAM_DBG(CAM_CDM, "Number of HW cdm supported =%d",
mgr->dt_supported_hw_cdm);
return rc;
}

View file

@ -0,0 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_CDM_SOC_H_
#define _CAM_CDM_SOC_H_
int cam_hw_cdm_soc_get_dt_properties(struct cam_hw_info *cdm_hw,
const struct of_device_id *table);
bool cam_cdm_read_hw_reg(struct cam_hw_info *cdm_hw,
enum cam_cdm_regs reg, uint32_t *value);
bool cam_cdm_write_hw_reg(struct cam_hw_info *cdm_hw,
enum cam_cdm_regs reg, uint32_t value);
int cam_cdm_intf_mgr_soc_get_dt_properties(
struct platform_device *pdev,
struct cam_cdm_intf_mgr *mgr);
int cam_cdm_soc_load_dt_private(struct platform_device *pdev,
struct cam_cdm_private_dt_data *ptr);
#endif /* _CAM_CDM_SOC_H_ */

View file

@ -0,0 +1,709 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/bug.h>
#include "cam_cdm_intf_api.h"
#include "cam_cdm_util.h"
#include "cam_cdm.h"
#include "cam_io_util.h"
#define CAM_CDM_DWORD 4
#define CAM_CDM_SW_CMD_COUNT 2
#define CAM_CMD_LENGTH_MASK 0xFFFF
#define CAM_CDM_COMMAND_OFFSET 24
#define CAM_CDM_REG_OFFSET_MASK 0x00FFFFFF
#define CAM_CDM_DMI_DATA_HI_OFFSET 8
#define CAM_CDM_DMI_DATA_LO_OFFSET 12
static unsigned int CDMCmdHeaderSizes[
CAM_CDM_CMD_PRIVATE_BASE + CAM_CDM_SW_CMD_COUNT] = {
0, /* UNUSED*/
3, /* DMI*/
0, /* UNUSED*/
2, /* RegContinuous*/
1, /* RegRandom*/
2, /* BUFFER_INDIREC*/
2, /* GenerateIRQ*/
3, /* WaitForEvent*/
1, /* ChangeBase*/
1, /* PERF_CONTROL*/
3, /* DMI32*/
3, /* DMI64*/
};
/**
* struct cdm_regrandom_cmd - Definition for CDM random register command.
* @count: Number of register writes
* @reserved: reserved bits
* @cmd: Command ID (CDMCmd)
*/
struct cdm_regrandom_cmd {
unsigned int count : 16;
unsigned int reserved : 8;
unsigned int cmd : 8;
} __attribute__((__packed__));
/**
* struct cdm_regcontinuous_cmd - Definition for a CDM register range command.
* @count: Number of register writes
* @reserved0: reserved bits
* @cmd: Command ID (CDMCmd)
* @offset: Start address of the range of registers
* @reserved1: reserved bits
*/
struct cdm_regcontinuous_cmd {
unsigned int count : 16;
unsigned int reserved0 : 8;
unsigned int cmd : 8;
unsigned int offset : 24;
unsigned int reserved1 : 8;
} __attribute__((__packed__));
/**
* struct cdm_dmi_cmd - Definition for a CDM DMI command.
* @length: Number of bytes in LUT - 1
* @reserved: reserved bits
* @cmd: Command ID (CDMCmd)
* @addr: Address of the LUT in memory
* @DMIAddr: Address of the target DMI config register
* @DMISel: DMI identifier
*/
struct cdm_dmi_cmd {
unsigned int length : 16;
unsigned int reserved : 8;
unsigned int cmd : 8;
unsigned int addr;
unsigned int DMIAddr : 24;
unsigned int DMISel : 8;
} __attribute__((__packed__));
/**
* struct cdm_indirect_cmd - Definition for a CDM indirect buffer command.
* @length: Number of bytes in buffer - 1
* @reserved: reserved bits
* @cmd: Command ID (CDMCmd)
* @addr: Device address of the indirect buffer
*/
struct cdm_indirect_cmd {
unsigned int length : 16;
unsigned int reserved : 8;
unsigned int cmd : 8;
unsigned int addr;
} __attribute__((__packed__));
/**
* struct cdm_changebase_cmd - Definition for CDM base address change command.
* @base: Base address to be changed to
* @cmd:Command ID (CDMCmd)
*/
struct cdm_changebase_cmd {
unsigned int base : 24;
unsigned int cmd : 8;
} __attribute__((__packed__));
/**
* struct cdm_wait_event_cmd - Definition for a CDM Gen IRQ command.
* @mask: Mask for the events
* @id: ID to read back for debug
* @iw_reserved: reserved bits
* @iw: iw AHB write bit
* @cmd:Command ID (CDMCmd)
* @offset: Offset to where data is written
* @offset_reserved: reserved bits
* @data: data returned in IRQ_USR_DATA
*/
struct cdm_wait_event_cmd {
unsigned int mask : 8;
unsigned int id : 8;
unsigned int iw_reserved : 7;
unsigned int iw : 1;
unsigned int cmd : 8;
unsigned int offset : 24;
unsigned int offset_reserved : 8;
unsigned int data;
} __attribute__((__packed__));
/**
* struct cdm_genirq_cmd - Definition for a CDM Wait event command.
* @reserved: reserved bits
* @cmd:Command ID (CDMCmd)
* @userdata: userdata returned in IRQ_USR_DATA
*/
struct cdm_genirq_cmd {
unsigned int reserved : 24;
unsigned int cmd : 8;
unsigned int userdata;
} __attribute__((__packed__));
/**
* struct cdm_perf_ctrl_cmd_t - Definition for CDM perf control command.
* @perf: perf command
* @reserved: reserved bits
* @cmd:Command ID (CDMCmd)
*/
struct cdm_perf_ctrl_cmd {
unsigned int perf : 2;
unsigned int reserved : 22;
unsigned int cmd : 8;
} __attribute__((__packed__));
uint32_t cdm_get_cmd_header_size(unsigned int command)
{
return CDMCmdHeaderSizes[command];
}
uint32_t cdm_required_size_reg_continuous(uint32_t numVals)
{
return cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT) + numVals;
}
uint32_t cdm_required_size_reg_random(uint32_t numRegVals)
{
return cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM) +
(2 * numRegVals);
}
uint32_t cdm_required_size_dmi(void)
{
return cdm_get_cmd_header_size(CAM_CDM_CMD_DMI);
}
uint32_t cdm_required_size_genirq(void)
{
return cdm_get_cmd_header_size(CAM_CDM_CMD_GEN_IRQ);
}
uint32_t cdm_required_size_indirect(void)
{
return cdm_get_cmd_header_size(CAM_CDM_CMD_BUFF_INDIRECT);
}
uint32_t cdm_required_size_changebase(void)
{
return cdm_get_cmd_header_size(CAM_CDM_CMD_CHANGE_BASE);
}
uint32_t cdm_offsetof_dmi_addr(void)
{
return offsetof(struct cdm_dmi_cmd, addr);
}
uint32_t cdm_offsetof_indirect_addr(void)
{
return offsetof(struct cdm_indirect_cmd, addr);
}
uint32_t *cdm_write_regcontinuous(uint32_t *pCmdBuffer, uint32_t reg,
uint32_t numVals, uint32_t *pVals)
{
uint32_t i;
struct cdm_regcontinuous_cmd *pHeader =
(struct cdm_regcontinuous_cmd *)pCmdBuffer;
pHeader->count = numVals;
pHeader->cmd = CAM_CDM_CMD_REG_CONT;
pHeader->reserved0 = 0;
pHeader->reserved1 = 0;
pHeader->offset = reg;
pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT);
for (i = 0; i < numVals; i++)
(((uint32_t *)pCmdBuffer)[i]) = (((uint32_t *)pVals)[i]);
pCmdBuffer += numVals;
return pCmdBuffer;
}
uint32_t *cdm_write_regrandom(uint32_t *pCmdBuffer, uint32_t numRegVals,
uint32_t *pRegVals)
{
uint32_t i;
uint32_t *dst, *src;
struct cdm_regrandom_cmd *pHeader =
(struct cdm_regrandom_cmd *)pCmdBuffer;
pHeader->count = numRegVals;
pHeader->cmd = CAM_CDM_CMD_REG_RANDOM;
pHeader->reserved = 0;
pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM);
dst = pCmdBuffer;
src = pRegVals;
for (i = 0; i < numRegVals; i++) {
*dst++ = *src++;
*dst++ = *src++;
}
return dst;
}
uint32_t *cdm_write_dmi(uint32_t *pCmdBuffer, uint8_t dmiCmd,
uint32_t DMIAddr, uint8_t DMISel, uint32_t dmiBufferAddr,
uint32_t length)
{
struct cdm_dmi_cmd *pHeader = (struct cdm_dmi_cmd *)pCmdBuffer;
pHeader->cmd = dmiCmd;
pHeader->addr = dmiBufferAddr;
pHeader->length = length - 1;
pHeader->DMIAddr = DMIAddr;
pHeader->DMISel = DMISel;
pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_DMI);
return pCmdBuffer;
}
uint32_t *cdm_write_indirect(uint32_t *pCmdBuffer, uint32_t indirectBufAddr,
uint32_t length)
{
struct cdm_indirect_cmd *pHeader =
(struct cdm_indirect_cmd *)pCmdBuffer;
pHeader->cmd = CAM_CDM_CMD_BUFF_INDIRECT;
pHeader->addr = indirectBufAddr;
pHeader->length = length - 1;
pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_BUFF_INDIRECT);
return pCmdBuffer;
}
uint32_t *cdm_write_changebase(uint32_t *pCmdBuffer, uint32_t base)
{
struct cdm_changebase_cmd *pHeader =
(struct cdm_changebase_cmd *)pCmdBuffer;
pHeader->cmd = CAM_CDM_CMD_CHANGE_BASE;
pHeader->base = base;
pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_CHANGE_BASE);
return pCmdBuffer;
}
void cdm_write_genirq(uint32_t *pCmdBuffer, uint32_t userdata)
{
struct cdm_genirq_cmd *pHeader = (struct cdm_genirq_cmd *)pCmdBuffer;
pHeader->cmd = CAM_CDM_CMD_GEN_IRQ;
pHeader->userdata = userdata;
}
struct cam_cdm_utils_ops CDM170_ops = {
cdm_get_cmd_header_size,
cdm_required_size_reg_continuous,
cdm_required_size_reg_random,
cdm_required_size_dmi,
cdm_required_size_genirq,
cdm_required_size_indirect,
cdm_required_size_changebase,
cdm_offsetof_dmi_addr,
cdm_offsetof_indirect_addr,
cdm_write_regcontinuous,
cdm_write_regrandom,
cdm_write_dmi,
cdm_write_indirect,
cdm_write_changebase,
cdm_write_genirq,
};
int cam_cdm_get_ioremap_from_base(uint32_t hw_base,
uint32_t base_array_size,
struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
void __iomem **device_base)
{
int ret = -EINVAL, i;
for (i = 0; i < base_array_size; i++) {
if (base_table[i])
CAM_DBG(CAM_CDM, "In loop %d ioremap for %x addr=%x",
i, (base_table[i])->mem_cam_base, hw_base);
if ((base_table[i]) &&
((base_table[i])->mem_cam_base == hw_base)) {
*device_base = (base_table[i])->mem_base;
ret = 0;
break;
}
}
return ret;
}
static int cam_cdm_util_reg_cont_write(void __iomem *base_addr,
uint32_t *cmd_buf, uint32_t cmd_buf_size, uint32_t *used_bytes)
{
int ret = 0;
uint32_t *data;
struct cdm_regcontinuous_cmd *reg_cont;
if ((cmd_buf_size < cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)) ||
(!base_addr)) {
CAM_ERR(CAM_CDM, "invalid base addr and data length %d %pK",
cmd_buf_size, base_addr);
return -EINVAL;
}
reg_cont = (struct cdm_regcontinuous_cmd *)cmd_buf;
if ((!reg_cont->count) || (reg_cont->count > 0x10000) ||
(((reg_cont->count * sizeof(uint32_t)) +
cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)) >
cmd_buf_size)) {
CAM_ERR(CAM_CDM, "buffer size %d is not sufficient for count%d",
cmd_buf_size, reg_cont->count);
return -EINVAL;
}
data = cmd_buf + cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT);
cam_io_memcpy(base_addr + reg_cont->offset, data,
reg_cont->count * sizeof(uint32_t));
*used_bytes = (reg_cont->count * sizeof(uint32_t)) +
(4 * cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT));
return ret;
}
static int cam_cdm_util_reg_random_write(void __iomem *base_addr,
uint32_t *cmd_buf, uint32_t cmd_buf_size, uint32_t *used_bytes)
{
uint32_t i;
struct cdm_regrandom_cmd *reg_random;
uint32_t *data;
if (!base_addr) {
CAM_ERR(CAM_CDM, "invalid base address");
return -EINVAL;
}
reg_random = (struct cdm_regrandom_cmd *) cmd_buf;
if ((!reg_random->count) || (reg_random->count > 0x10000) ||
(((reg_random->count * (sizeof(uint32_t) * 2)) +
cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM)) >
cmd_buf_size)) {
CAM_ERR(CAM_CDM, "invalid reg_count %d cmd_buf_size %d",
reg_random->count, cmd_buf_size);
return -EINVAL;
}
data = cmd_buf + cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM);
for (i = 0; i < reg_random->count; i++) {
CAM_DBG(CAM_CDM, "reg random: offset %pK, value 0x%x",
((void __iomem *)(base_addr + data[0])),
data[1]);
cam_io_w(data[1], base_addr + data[0]);
data += 2;
}
*used_bytes = ((reg_random->count * (sizeof(uint32_t) * 2)) +
(4 * cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM)));
return 0;
}
static int cam_cdm_util_swd_dmi_write(uint32_t cdm_cmd_type,
void __iomem *base_addr, uint32_t *cmd_buf, uint32_t cmd_buf_size,
uint32_t *used_bytes)
{
uint32_t i;
struct cdm_dmi_cmd *swd_dmi;
uint32_t *data;
swd_dmi = (struct cdm_dmi_cmd *)cmd_buf;
if (cmd_buf_size < (cdm_required_size_dmi() + swd_dmi->length + 1)) {
CAM_ERR(CAM_CDM, "invalid CDM_SWD_DMI length %d",
swd_dmi->length + 1);
return -EINVAL;
}
data = cmd_buf + cdm_required_size_dmi();
if (cdm_cmd_type == CAM_CDM_CMD_SWD_DMI_64) {
for (i = 0; i < (swd_dmi->length + 1)/8; i++) {
cam_io_w_mb(data[0], base_addr +
swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET);
cam_io_w_mb(data[1], base_addr +
swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_HI_OFFSET);
data += 2;
}
} else {
for (i = 0; i < (swd_dmi->length + 1)/4; i++) {
cam_io_w_mb(data[0], base_addr +
swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET);
data += 1;
}
}
*used_bytes = (4 * cdm_required_size_dmi()) + swd_dmi->length + 1;
return 0;
}
int cam_cdm_util_cmd_buf_write(void __iomem **current_device_base,
uint32_t *cmd_buf, uint32_t cmd_buf_size,
struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
uint32_t base_array_size, uint8_t bl_tag)
{
int ret = 0;
uint32_t cdm_cmd_type = 0, total_cmd_buf_size = 0;
uint32_t used_bytes = 0;
total_cmd_buf_size = cmd_buf_size;
while (cmd_buf_size > 0) {
CAM_DBG(CAM_CDM, "cmd data=%x", *cmd_buf);
cdm_cmd_type = (*cmd_buf >> CAM_CDM_COMMAND_OFFSET);
switch (cdm_cmd_type) {
case CAM_CDM_CMD_REG_CONT: {
ret = cam_cdm_util_reg_cont_write(*current_device_base,
cmd_buf, cmd_buf_size, &used_bytes);
if (ret)
break;
if (used_bytes > 0) {
cmd_buf_size -= used_bytes;
cmd_buf += used_bytes/4;
}
}
break;
case CAM_CDM_CMD_REG_RANDOM: {
ret = cam_cdm_util_reg_random_write(
*current_device_base, cmd_buf, cmd_buf_size,
&used_bytes);
if (ret)
break;
if (used_bytes > 0) {
cmd_buf_size -= used_bytes;
cmd_buf += used_bytes / 4;
}
}
break;
case CAM_CDM_CMD_SWD_DMI_32:
case CAM_CDM_CMD_SWD_DMI_64: {
if (*current_device_base == 0) {
CAM_ERR(CAM_CDM,
"Got SWI DMI cmd =%d for invalid hw",
cdm_cmd_type);
ret = -EINVAL;
break;
}
ret = cam_cdm_util_swd_dmi_write(cdm_cmd_type,
*current_device_base, cmd_buf, cmd_buf_size,
&used_bytes);
if (ret)
break;
if (used_bytes > 0) {
cmd_buf_size -= used_bytes;
cmd_buf += used_bytes / 4;
}
}
break;
case CAM_CDM_CMD_CHANGE_BASE: {
struct cdm_changebase_cmd *change_base_cmd =
(struct cdm_changebase_cmd *)cmd_buf;
ret = cam_cdm_get_ioremap_from_base(
change_base_cmd->base, base_array_size,
base_table, current_device_base);
if (ret != 0) {
CAM_ERR(CAM_CDM,
"Get ioremap change base failed %x",
change_base_cmd->base);
break;
}
CAM_DBG(CAM_CDM, "Got ioremap for %x addr=%pK",
change_base_cmd->base,
current_device_base);
cmd_buf_size -= (4 *
cdm_required_size_changebase());
cmd_buf += cdm_required_size_changebase();
}
break;
default:
CAM_ERR(CAM_CDM, "unsupported cdm_cmd_type type 0%x",
cdm_cmd_type);
ret = -EINVAL;
break;
}
if (ret < 0)
break;
}
return ret;
}
static long cam_cdm_util_dump_dmi_cmd(uint32_t *cmd_buf_addr)
{
long ret = 0;
ret += CDMCmdHeaderSizes[CAM_CDM_CMD_DMI];
CAM_INFO(CAM_CDM, "DMI");
return ret;
}
static long cam_cdm_util_dump_buff_indirect(uint32_t *cmd_buf_addr)
{
long ret = 0;
ret += CDMCmdHeaderSizes[CAM_CDM_CMD_BUFF_INDIRECT];
CAM_INFO(CAM_CDM, "Buff Indirect");
return ret;
}
static long cam_cdm_util_dump_reg_cont_cmd(uint32_t *cmd_buf_addr)
{
long ret = 0;
struct cdm_regcontinuous_cmd *p_regcont_cmd;
uint32_t *temp_ptr = cmd_buf_addr;
int i = 0;
p_regcont_cmd = (struct cdm_regcontinuous_cmd *)temp_ptr;
temp_ptr += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_CONT];
ret += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_CONT];
CAM_INFO(CAM_CDM, "REG_CONT: COUNT: %u OFFSET: 0x%X",
p_regcont_cmd->count, p_regcont_cmd->offset);
for (i = 0; i < p_regcont_cmd->count; i++) {
CAM_INFO(CAM_CDM, "DATA_%d: 0x%X", i,
*temp_ptr);
temp_ptr++;
ret++;
}
return ret;
}
static long cam_cdm_util_dump_reg_random_cmd(uint32_t *cmd_buf_addr)
{
struct cdm_regrandom_cmd *p_regrand_cmd;
uint32_t *temp_ptr = cmd_buf_addr;
long ret = 0;
int i = 0;
p_regrand_cmd = (struct cdm_regrandom_cmd *)temp_ptr;
temp_ptr += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_RANDOM];
ret += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_RANDOM];
CAM_INFO(CAM_CDM, "REG_RAND: COUNT: %u",
p_regrand_cmd->count);
for (i = 0; i < p_regrand_cmd->count; i++) {
CAM_INFO(CAM_CDM, "OFFSET_%d: 0x%X DATA_%d: 0x%X",
i, *temp_ptr & CAM_CDM_REG_OFFSET_MASK, i,
*(temp_ptr + 1));
temp_ptr += 2;
ret += 2;
}
return ret;
}
static long cam_cdm_util_dump_gen_irq_cmd(uint32_t *cmd_buf_addr)
{
long ret = 0;
ret += CDMCmdHeaderSizes[CAM_CDM_CMD_GEN_IRQ];
CAM_INFO(CAM_CDM, "GEN_IRQ");
return ret;
}
static long cam_cdm_util_dump_wait_event_cmd(uint32_t *cmd_buf_addr)
{
long ret = 0;
ret += CDMCmdHeaderSizes[CAM_CDM_CMD_WAIT_EVENT];
CAM_INFO(CAM_CDM, "WAIT_EVENT");
return ret;
}
static long cam_cdm_util_dump_change_base_cmd(uint32_t *cmd_buf_addr)
{
long ret = 0;
struct cdm_changebase_cmd *p_cbase_cmd;
uint32_t *temp_ptr = cmd_buf_addr;
p_cbase_cmd = (struct cdm_changebase_cmd *)temp_ptr;
ret += CDMCmdHeaderSizes[CAM_CDM_CMD_CHANGE_BASE];
CAM_INFO(CAM_CDM, "CHANGE_BASE: 0x%X",
p_cbase_cmd->base);
return ret;
}
static long cam_cdm_util_dump_perf_ctrl_cmd(uint32_t *cmd_buf_addr)
{
long ret = 0;
ret += CDMCmdHeaderSizes[CAM_CDM_CMD_PERF_CTRL];
CAM_INFO(CAM_CDM, "PERF_CTRL");
return ret;
}
void cam_cdm_util_dump_cmd_buf(
uint32_t *cmd_buf_start, uint32_t *cmd_buf_end)
{
uint32_t *buf_now = cmd_buf_start;
uint32_t cmd = 0;
if (!cmd_buf_start || !cmd_buf_end) {
CAM_INFO(CAM_CDM, "Invalid args");
return;
}
do {
cmd = *buf_now;
cmd = cmd >> CAM_CDM_COMMAND_OFFSET;
switch (cmd) {
case CAM_CDM_CMD_DMI:
case CAM_CDM_CMD_DMI_32:
case CAM_CDM_CMD_DMI_64:
buf_now += cam_cdm_util_dump_dmi_cmd(buf_now);
break;
case CAM_CDM_CMD_REG_CONT:
buf_now += cam_cdm_util_dump_reg_cont_cmd(buf_now);
break;
case CAM_CDM_CMD_REG_RANDOM:
buf_now += cam_cdm_util_dump_reg_random_cmd(buf_now);
break;
case CAM_CDM_CMD_BUFF_INDIRECT:
buf_now += cam_cdm_util_dump_buff_indirect(buf_now);
break;
case CAM_CDM_CMD_GEN_IRQ:
buf_now += cam_cdm_util_dump_gen_irq_cmd(buf_now);
break;
case CAM_CDM_CMD_WAIT_EVENT:
buf_now += cam_cdm_util_dump_wait_event_cmd(buf_now);
break;
case CAM_CDM_CMD_CHANGE_BASE:
buf_now += cam_cdm_util_dump_change_base_cmd(buf_now);
break;
case CAM_CDM_CMD_PERF_CTRL:
buf_now += cam_cdm_util_dump_perf_ctrl_cmd(buf_now);
break;
default:
CAM_INFO(CAM_CDM, "Invalid CMD: 0x%x buf 0x%x",
cmd, *buf_now);
buf_now++;
break;
}
} while (buf_now <= cmd_buf_end);
}

View file

@ -0,0 +1,161 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_CDM_UTIL_H_
#define _CAM_CDM_UTIL_H_
enum cam_cdm_command {
CAM_CDM_CMD_UNUSED = 0x0,
CAM_CDM_CMD_DMI = 0x1,
CAM_CDM_CMD_NOT_DEFINED = 0x2,
CAM_CDM_CMD_REG_CONT = 0x3,
CAM_CDM_CMD_REG_RANDOM = 0x4,
CAM_CDM_CMD_BUFF_INDIRECT = 0x5,
CAM_CDM_CMD_GEN_IRQ = 0x6,
CAM_CDM_CMD_WAIT_EVENT = 0x7,
CAM_CDM_CMD_CHANGE_BASE = 0x8,
CAM_CDM_CMD_PERF_CTRL = 0x9,
CAM_CDM_CMD_DMI_32 = 0xa,
CAM_CDM_CMD_DMI_64 = 0xb,
CAM_CDM_CMD_PRIVATE_BASE = 0xc,
CAM_CDM_CMD_SWD_DMI_32 = (CAM_CDM_CMD_PRIVATE_BASE + 0x64),
CAM_CDM_CMD_SWD_DMI_64 = (CAM_CDM_CMD_PRIVATE_BASE + 0x65),
CAM_CDM_CMD_PRIVATE_BASE_MAX = 0x7F
};
/**
* struct cam_cdm_utils_ops - Camera CDM util ops
*
* @cdm_get_cmd_header_size: Returns the size of the given command header
* in DWORDs.
* @command Command ID
* @return Size of the command in DWORDs
*
* @cdm_required_size_reg_continuous: Calculates the size of a reg-continuous
* command in dwords.
* @numVals Number of continuous values
* @return Size in dwords
*
* @cdm_required_size_reg_random: Calculates the size of a reg-random command
* in dwords.
* @numRegVals Number of register/value pairs
* @return Size in dwords
*
* @cdm_required_size_dmi: Calculates the size of a DMI command in dwords.
* @return Size in dwords
*
* @cdm_required_size_genirq: Calculates size of a Genirq command in dwords.
* @return Size in dwords
*
* @cdm_required_size_indirect: Calculates the size of an indirect command
* in dwords.
* @return Size in dwords
*
* @cdm_required_size_changebase: Calculates the size of a change-base command
* in dwords.
* @return Size in dwords
*
* @cdm_offsetof_dmi_addr: Returns the offset of address field in the DMI
* command header.
* @return Offset of addr field
*
* @cdm_offsetof_indirect_addr: Returns the offset of address field in the
* indirect command header.
* @return Offset of addr field
*
* @cdm_write_regcontinuous: Writes a command into the command buffer.
* @pCmdBuffer: Pointer to command buffer
* @reg: Beginning of the register address range where
* values will be written.
* @numVals: Number of values (registers) that will be written
* @pVals : An array of values that will be written
* @return Pointer in command buffer pointing past the written commands
*
* @cdm_write_regrandom: Writes a command into the command buffer in
* register/value pairs.
* @pCmdBuffer: Pointer to command buffer
* @numRegVals: Number of register/value pairs that will be written
* @pRegVals: An array of register/value pairs that will be written
* The even indices are registers and the odd indices
* arevalues, e.g., {reg1, val1, reg2, val2, ...}.
* @return Pointer in command buffer pointing past the written commands
*
* @cdm_write_dmi: Writes a DMI command into the command bufferM.
* @pCmdBuffer: Pointer to command buffer
* @dmiCmd: DMI command
* @DMIAddr: Address of the DMI
* @DMISel: Selected bank that the DMI will write to
* @length: Size of data in bytes
* @return Pointer in command buffer pointing past the written commands
*
* @cdm_write_indirect: Writes a indirect command into the command buffer.
* @pCmdBuffer: Pointer to command buffer
* @indirectBufferAddr: Device address of the indirect cmd buffer.
* @length: Size of data in bytes
* @return Pointer in command buffer pointing past the written commands
*
* @cdm_write_changebase: Writes a changing CDM (address) base command into
* the command buffer.
* @pCmdBuffer: Pointer to command buffer
* @base: New base (device) address
* @return Pointer in command buffer pointing past the written commands
*
* @cdm_write_genirq: Writes a gen irq command into the command buffer.
* @pCmdBuffer: Pointer to command buffer
* @userdata: userdata or cookie return by hardware during irq.
*/
struct cam_cdm_utils_ops {
uint32_t (*cdm_get_cmd_header_size)(unsigned int command);
uint32_t (*cdm_required_size_reg_continuous)(uint32_t numVals);
uint32_t (*cdm_required_size_reg_random)(uint32_t numRegVals);
uint32_t (*cdm_required_size_dmi)(void);
uint32_t (*cdm_required_size_genirq)(void);
uint32_t (*cdm_required_size_indirect)(void);
uint32_t (*cdm_required_size_changebase)(void);
uint32_t (*cdm_offsetof_dmi_addr)(void);
uint32_t (*cdm_offsetof_indirect_addr)(void);
uint32_t* (*cdm_write_regcontinuous)(
uint32_t *pCmdBuffer,
uint32_t reg,
uint32_t numVals,
uint32_t *pVals);
uint32_t *(*cdm_write_regrandom)(
uint32_t *pCmdBuffer,
uint32_t numRegVals,
uint32_t *pRegVals);
uint32_t *(*cdm_write_dmi)(
uint32_t *pCmdBuffer,
uint8_t dmiCmd,
uint32_t DMIAddr,
uint8_t DMISel,
uint32_t dmiBufferAddr,
uint32_t length);
uint32_t *(*cdm_write_indirect)(
uint32_t *pCmdBuffer,
uint32_t indirectBufferAddr,
uint32_t length);
uint32_t *(*cdm_write_changebase)(
uint32_t *pCmdBuffer,
uint32_t base);
void (*cdm_write_genirq)(
uint32_t *pCmdBuffer,
uint32_t userdata);
};
/**
* cam_cdm_util_log_cmd_bufs()
*
* @brief: Util function to log cdm command buffers
*
* @cmd_buffer_start: Pointer to start of cmd buffer
* @cmd_buffer_end: Pointer to end of cmd buffer
*
*/
void cam_cdm_util_dump_cmd_buf(
uint32_t *cmd_buffer_start, uint32_t *cmd_buffer_end);
#endif /* _CAM_CDM_UTIL_H_ */

View file

@ -0,0 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_CDM_VIRTUAL_H_
#define _CAM_CDM_VIRTUAL_H_
#include "cam_cdm_intf_api.h"
int cam_virtual_cdm_probe(struct platform_device *pdev);
int cam_virtual_cdm_remove(struct platform_device *pdev);
int cam_cdm_util_cmd_buf_write(void __iomem **current_device_base,
uint32_t *cmd_buf, uint32_t cmd_buf_size,
struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
uint32_t base_array_size, uint8_t bl_tag);
#endif /* _CAM_CDM_VIRTUAL_H_ */

View file

@ -0,0 +1,376 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/ion.h>
#include <linux/iommu.h>
#include <linux/timer.h>
#include <linux/kernel.h>
#include "cam_soc_util.h"
#include "cam_smmu_api.h"
#include "cam_cdm_intf_api.h"
#include "cam_cdm.h"
#include "cam_cdm_util.h"
#include "cam_cdm_virtual.h"
#include "cam_cdm_core_common.h"
#include "cam_cdm_soc.h"
#include "cam_io_util.h"
#define CAM_CDM_VIRTUAL_NAME "qcom,cam_virtual_cdm"
static void cam_virtual_cdm_work(struct work_struct *work)
{
struct cam_cdm_work_payload *payload;
struct cam_hw_info *cdm_hw;
struct cam_cdm *core;
payload = container_of(work, struct cam_cdm_work_payload, work);
if (payload) {
cdm_hw = payload->hw;
core = (struct cam_cdm *)cdm_hw->core_info;
if (payload->irq_status & 0x2) {
struct cam_cdm_bl_cb_request_entry *node;
CAM_DBG(CAM_CDM, "CDM HW Gen/inline IRQ with data=%x",
payload->irq_data);
mutex_lock(&cdm_hw->hw_mutex);
node = cam_cdm_find_request_by_bl_tag(
payload->irq_data,
&core->bl_request_list);
if (node) {
if (node->request_type ==
CAM_HW_CDM_BL_CB_CLIENT) {
cam_cdm_notify_clients(cdm_hw,
CAM_CDM_CB_STATUS_BL_SUCCESS,
(void *)node);
} else if (node->request_type ==
CAM_HW_CDM_BL_CB_INTERNAL) {
CAM_ERR(CAM_CDM, "Invalid node=%pK %d",
node, node->request_type);
}
list_del_init(&node->entry);
kfree(node);
} else {
CAM_ERR(CAM_CDM, "Invalid node for inline irq");
}
mutex_unlock(&cdm_hw->hw_mutex);
}
if (payload->irq_status & 0x1) {
CAM_DBG(CAM_CDM, "CDM HW reset done IRQ");
complete(&core->reset_complete);
}
kfree(payload);
}
}
int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
struct cam_cdm_hw_intf_cmd_submit_bl *req,
struct cam_cdm_client *client)
{
int i, rc = -EINVAL;
struct cam_cdm_bl_request *cdm_cmd = req->data;
struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
mutex_lock(&client->lock);
for (i = 0; i < req->data->cmd_arrary_count ; i++) {
uintptr_t vaddr_ptr = 0;
size_t len = 0;
if ((!cdm_cmd->cmd[i].len) &&
(cdm_cmd->cmd[i].len > 0x100000)) {
CAM_ERR(CAM_CDM,
"len(%d) is invalid count=%d total cnt=%d",
cdm_cmd->cmd[i].len, i,
req->data->cmd_arrary_count);
rc = -EINVAL;
break;
}
if (req->data->type == CAM_CDM_BL_CMD_TYPE_MEM_HANDLE) {
rc = cam_mem_get_cpu_buf(
cdm_cmd->cmd[i].bl_addr.mem_handle, &vaddr_ptr,
&len);
} else if (req->data->type ==
CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA) {
rc = 0;
vaddr_ptr = cdm_cmd->cmd[i].bl_addr.kernel_iova;
len = cdm_cmd->cmd[i].offset + cdm_cmd->cmd[i].len;
} else {
CAM_ERR(CAM_CDM,
"Only mem hdl/Kernel va type is supported %d",
req->data->type);
rc = -EINVAL;
break;
}
if ((!rc) && (vaddr_ptr) && (len) &&
(len >= cdm_cmd->cmd[i].offset)) {
CAM_DBG(CAM_CDM,
"hdl=%x vaddr=%pK offset=%d cmdlen=%d:%zu",
cdm_cmd->cmd[i].bl_addr.mem_handle,
(void *)vaddr_ptr, cdm_cmd->cmd[i].offset,
cdm_cmd->cmd[i].len, len);
rc = cam_cdm_util_cmd_buf_write(
&client->changebase_addr,
((uint32_t *)vaddr_ptr +
((cdm_cmd->cmd[i].offset)/4)),
cdm_cmd->cmd[i].len, client->data.base_array,
client->data.base_array_cnt, core->bl_tag);
if (rc) {
CAM_ERR(CAM_CDM,
"write failed for cnt=%d:%d len %u",
i, req->data->cmd_arrary_count,
cdm_cmd->cmd[i].len);
break;
}
} else {
CAM_ERR(CAM_CDM,
"Sanity check failed for hdl=%x len=%zu:%d",
cdm_cmd->cmd[i].bl_addr.mem_handle, len,
cdm_cmd->cmd[i].offset);
CAM_ERR(CAM_CDM,
"Sanity check failed for cmd_count=%d cnt=%d",
i, req->data->cmd_arrary_count);
rc = -EINVAL;
break;
}
if (!rc) {
struct cam_cdm_work_payload *payload;
CAM_DBG(CAM_CDM,
"write BL success for cnt=%d with tag=%d",
i, core->bl_tag);
if ((true == req->data->flag) &&
(i == req->data->cmd_arrary_count)) {
struct cam_cdm_bl_cb_request_entry *node;
node = kzalloc(sizeof(
struct cam_cdm_bl_cb_request_entry),
GFP_KERNEL);
if (!node) {
rc = -ENOMEM;
break;
}
node->request_type = CAM_HW_CDM_BL_CB_CLIENT;
node->client_hdl = req->handle;
node->cookie = req->data->cookie;
node->bl_tag = core->bl_tag;
node->userdata = req->data->userdata;
mutex_lock(&cdm_hw->hw_mutex);
list_add_tail(&node->entry,
&core->bl_request_list);
mutex_unlock(&cdm_hw->hw_mutex);
payload = kzalloc(sizeof(
struct cam_cdm_work_payload),
GFP_ATOMIC);
if (payload) {
payload->irq_status = 0x2;
payload->irq_data = core->bl_tag;
payload->hw = cdm_hw;
INIT_WORK((struct work_struct *)
&payload->work,
cam_virtual_cdm_work);
queue_work(core->work_queue,
&payload->work);
}
}
core->bl_tag++;
CAM_DBG(CAM_CDM,
"Now commit the BL nothing for virtual");
if (!rc && (core->bl_tag == 63))
core->bl_tag = 0;
}
}
mutex_unlock(&client->lock);
return rc;
}
int cam_virtual_cdm_probe(struct platform_device *pdev)
{
struct cam_hw_info *cdm_hw = NULL;
struct cam_hw_intf *cdm_hw_intf = NULL;
struct cam_cdm *cdm_core = NULL;
struct cam_cdm_private_dt_data *soc_private = NULL;
int rc;
struct cam_cpas_register_params cpas_parms;
cdm_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
if (!cdm_hw_intf)
return -ENOMEM;
cdm_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
if (!cdm_hw) {
kfree(cdm_hw_intf);
return -ENOMEM;
}
cdm_hw->core_info = kzalloc(sizeof(struct cam_cdm), GFP_KERNEL);
if (!cdm_hw->core_info) {
kfree(cdm_hw);
kfree(cdm_hw_intf);
return -ENOMEM;
}
cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
cdm_hw->soc_info.pdev = pdev;
cdm_hw_intf->hw_type = CAM_VIRTUAL_CDM;
cdm_hw->soc_info.soc_private = kzalloc(
sizeof(struct cam_cdm_private_dt_data), GFP_KERNEL);
if (!cdm_hw->soc_info.soc_private) {
rc = -ENOMEM;
goto soc_load_failed;
}
rc = cam_cdm_soc_load_dt_private(pdev, cdm_hw->soc_info.soc_private);
if (rc) {
CAM_ERR(CAM_CDM, "Failed to load CDM dt private data");
kfree(cdm_hw->soc_info.soc_private);
cdm_hw->soc_info.soc_private = NULL;
goto soc_load_failed;
}
cdm_core = (struct cam_cdm *)cdm_hw->core_info;
soc_private = (struct cam_cdm_private_dt_data *)
cdm_hw->soc_info.soc_private;
if (soc_private->dt_cdm_shared == true)
cdm_core->flags = CAM_CDM_FLAG_SHARED_CDM;
else
cdm_core->flags = CAM_CDM_FLAG_PRIVATE_CDM;
cdm_core->bl_tag = 0;
INIT_LIST_HEAD(&cdm_core->bl_request_list);
init_completion(&cdm_core->reset_complete);
cdm_hw_intf->hw_priv = cdm_hw;
cdm_hw_intf->hw_ops.get_hw_caps = cam_cdm_get_caps;
cdm_hw_intf->hw_ops.init = NULL;
cdm_hw_intf->hw_ops.deinit = NULL;
cdm_hw_intf->hw_ops.start = cam_cdm_stream_start;
cdm_hw_intf->hw_ops.stop = cam_cdm_stream_stop;
cdm_hw_intf->hw_ops.read = NULL;
cdm_hw_intf->hw_ops.write = NULL;
cdm_hw_intf->hw_ops.process_cmd = cam_cdm_process_cmd;
CAM_DBG(CAM_CDM, "type %d index %d", cdm_hw_intf->hw_type,
cdm_hw_intf->hw_idx);
platform_set_drvdata(pdev, cdm_hw_intf);
cdm_hw->open_count = 0;
cdm_core->iommu_hdl.non_secure = -1;
cdm_core->iommu_hdl.secure = -1;
mutex_init(&cdm_hw->hw_mutex);
spin_lock_init(&cdm_hw->hw_lock);
init_completion(&cdm_hw->hw_complete);
mutex_lock(&cdm_hw->hw_mutex);
cdm_core->id = CAM_CDM_VIRTUAL;
memcpy(cdm_core->name, CAM_CDM_VIRTUAL_NAME,
sizeof(CAM_CDM_VIRTUAL_NAME));
cdm_core->work_queue = alloc_workqueue(cdm_core->name,
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS,
CAM_CDM_INFLIGHT_WORKS);
cdm_core->ops = NULL;
cpas_parms.cam_cpas_client_cb = cam_cdm_cpas_cb;
cpas_parms.cell_index = cdm_hw->soc_info.index;
cpas_parms.dev = &pdev->dev;
cpas_parms.userdata = cdm_hw_intf;
strlcpy(cpas_parms.identifier, "cam-cdm-intf",
CAM_HW_IDENTIFIER_LENGTH);
rc = cam_cpas_register_client(&cpas_parms);
if (rc) {
CAM_ERR(CAM_CDM, "Virtual CDM CPAS registration failed");
goto cpas_registration_failed;
}
CAM_DBG(CAM_CDM, "CPAS registration successful handle=%d",
cpas_parms.client_handle);
cdm_core->cpas_handle = cpas_parms.client_handle;
CAM_DBG(CAM_CDM, "CDM%d probe successful", cdm_hw_intf->hw_idx);
rc = cam_cdm_intf_register_hw_cdm(cdm_hw_intf,
soc_private, CAM_VIRTUAL_CDM, &cdm_core->index);
if (rc) {
CAM_ERR(CAM_CDM, "Virtual CDM Interface registration failed");
goto intf_registration_failed;
}
CAM_DBG(CAM_CDM, "CDM%d registered to intf successful",
cdm_hw_intf->hw_idx);
mutex_unlock(&cdm_hw->hw_mutex);
return 0;
intf_registration_failed:
cam_cpas_unregister_client(cdm_core->cpas_handle);
cpas_registration_failed:
kfree(cdm_hw->soc_info.soc_private);
flush_workqueue(cdm_core->work_queue);
destroy_workqueue(cdm_core->work_queue);
mutex_unlock(&cdm_hw->hw_mutex);
mutex_destroy(&cdm_hw->hw_mutex);
soc_load_failed:
kfree(cdm_hw->core_info);
kfree(cdm_hw);
kfree(cdm_hw_intf);
return rc;
}
int cam_virtual_cdm_remove(struct platform_device *pdev)
{
struct cam_hw_info *cdm_hw = NULL;
struct cam_hw_intf *cdm_hw_intf = NULL;
struct cam_cdm *cdm_core = NULL;
int rc = -EBUSY;
cdm_hw_intf = platform_get_drvdata(pdev);
if (!cdm_hw_intf) {
CAM_ERR(CAM_CDM, "Failed to get dev private data");
return rc;
}
cdm_hw = cdm_hw_intf->hw_priv;
if (!cdm_hw) {
CAM_ERR(CAM_CDM,
"Failed to get virtual private data for type=%d idx=%d",
cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
return rc;
}
cdm_core = cdm_hw->core_info;
if (!cdm_core) {
CAM_ERR(CAM_CDM,
"Failed to get virtual core data for type=%d idx=%d",
cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
return rc;
}
rc = cam_cpas_unregister_client(cdm_core->cpas_handle);
if (rc) {
CAM_ERR(CAM_CDM, "CPAS unregister failed");
return rc;
}
rc = cam_cdm_intf_deregister_hw_cdm(cdm_hw_intf,
cdm_hw->soc_info.soc_private, CAM_VIRTUAL_CDM,
cdm_core->index);
if (rc) {
CAM_ERR(CAM_CDM,
"Virtual CDM Interface de-registration failed");
return rc;
}
flush_workqueue(cdm_core->work_queue);
destroy_workqueue(cdm_core->work_queue);
mutex_destroy(&cdm_hw->hw_mutex);
kfree(cdm_hw->soc_info.soc_private);
kfree(cdm_hw->core_info);
kfree(cdm_hw);
kfree(cdm_hw_intf);
rc = 0;
return rc;
}

View file

@ -0,0 +1,135 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_HW_CDM170_REG_H_
#define _CAM_HW_CDM170_REG_H_
#define CAM_CDM_REG_OFFSET_FIRST 0x0
#define CAM_CDM_REG_OFFSET_LAST 0x200
#define CAM_CDM_REGS_COUNT 0x30
#define CAM_CDM_HWFIFO_SIZE 0x40
#define CAM_CDM_OFFSET_HW_VERSION 0x0
#define CAM_CDM_OFFSET_TITAN_VERSION 0x4
#define CAM_CDM_OFFSET_RST_CMD 0x10
#define CAM_CDM_OFFSET_CGC_CFG 0x14
#define CAM_CDM_OFFSET_CORE_CFG 0x18
#define CAM_CDM_OFFSET_CORE_EN 0x1c
#define CAM_CDM_OFFSET_FE_CFG 0x20
#define CAM_CDM_OFFSET_IRQ_MASK 0x30
#define CAM_CDM_OFFSET_IRQ_CLEAR 0x34
#define CAM_CDM_OFFSET_IRQ_CLEAR_CMD 0x38
#define CAM_CDM_OFFSET_IRQ_SET 0x3c
#define CAM_CDM_OFFSET_IRQ_SET_CMD 0x40
#define CAM_CDM_OFFSET_IRQ_STATUS 0x44
#define CAM_CDM_IRQ_STATUS_INFO_RST_DONE_MASK 0x1
#define CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK 0x2
#define CAM_CDM_IRQ_STATUS_INFO_BL_DONE_MASK 0x4
#define CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK 0x10000
#define CAM_CDM_IRQ_STATUS_ERROR_OVER_FLOW_MASK 0x20000
#define CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK 0x40000
#define CAM_CDM_OFFSET_BL_FIFO_BASE_REG 0x50
#define CAM_CDM_OFFSET_BL_FIFO_LEN_REG 0x54
#define CAM_CDM_OFFSET_BL_FIFO_STORE_REG 0x58
#define CAM_CDM_OFFSET_BL_FIFO_CFG 0x5c
#define CAM_CDM_OFFSET_BL_FIFO_RB 0x60
#define CAM_CDM_OFFSET_BL_FIFO_BASE_RB 0x64
#define CAM_CDM_OFFSET_BL_FIFO_LEN_RB 0x68
#define CAM_CDM_OFFSET_BL_FIFO_PENDING_REQ_RB 0x6c
#define CAM_CDM_OFFSET_IRQ_USR_DATA 0x80
#define CAM_CDM_OFFSET_WAIT_STATUS 0x84
#define CAM_CDM_OFFSET_SCRATCH_0_REG 0x90
#define CAM_CDM_OFFSET_SCRATCH_1_REG 0x94
#define CAM_CDM_OFFSET_SCRATCH_2_REG 0x98
#define CAM_CDM_OFFSET_SCRATCH_3_REG 0x9c
#define CAM_CDM_OFFSET_SCRATCH_4_REG 0xa0
#define CAM_CDM_OFFSET_SCRATCH_5_REG 0xa4
#define CAM_CDM_OFFSET_SCRATCH_6_REG 0xa8
#define CAM_CDM_OFFSET_SCRATCH_7_REG 0xac
#define CAM_CDM_OFFSET_LAST_AHB_ADDR 0xd0
#define CAM_CDM_OFFSET_LAST_AHB_DATA 0xd4
#define CAM_CDM_OFFSET_CORE_DBUG 0xd8
#define CAM_CDM_OFFSET_LAST_AHB_ERR_ADDR 0xe0
#define CAM_CDM_OFFSET_LAST_AHB_ERR_DATA 0xe4
#define CAM_CDM_OFFSET_CURRENT_BL_BASE 0xe8
#define CAM_CDM_OFFSET_CURRENT_BL_LEN 0xec
#define CAM_CDM_OFFSET_CURRENT_USED_AHB_BASE 0xf0
#define CAM_CDM_OFFSET_DEBUG_STATUS 0xf4
#define CAM_CDM_OFFSET_BUS_MISR_CFG_0 0x100
#define CAM_CDM_OFFSET_BUS_MISR_CFG_1 0x104
#define CAM_CDM_OFFSET_BUS_MISR_RD_VAL 0x108
#define CAM_CDM_OFFSET_PERF_MON_CTRL 0x110
#define CAM_CDM_OFFSET_PERF_MON_0 0x114
#define CAM_CDM_OFFSET_PERF_MON_1 0x118
#define CAM_CDM_OFFSET_PERF_MON_2 0x11c
#define CAM_CDM_OFFSET_SPARE 0x200
/*
* Always make sure below register offsets are aligned with
* enum cam_cdm_regs offsets
*/
struct cam_cdm_reg_offset cam170_cpas_cdm_register_offsets[] = {
{ CAM_CDM_OFFSET_HW_VERSION, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_TITAN_VERSION, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_RST_CMD, CAM_REG_ATTR_WRITE },
{ CAM_CDM_OFFSET_CGC_CFG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_CORE_CFG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_CORE_EN, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_FE_CFG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_IRQ_MASK, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_IRQ_CLEAR, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_IRQ_CLEAR_CMD, CAM_REG_ATTR_WRITE },
{ CAM_CDM_OFFSET_IRQ_SET, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_IRQ_SET_CMD, CAM_REG_ATTR_WRITE },
{ CAM_CDM_OFFSET_IRQ_STATUS, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_IRQ_USR_DATA, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_BL_FIFO_BASE_REG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_BL_FIFO_LEN_REG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_BL_FIFO_STORE_REG, CAM_REG_ATTR_WRITE },
{ CAM_CDM_OFFSET_BL_FIFO_CFG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_BL_FIFO_RB, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_BL_FIFO_BASE_RB, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_BL_FIFO_LEN_RB, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_BL_FIFO_PENDING_REQ_RB, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_WAIT_STATUS, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_SCRATCH_0_REG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_SCRATCH_1_REG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_SCRATCH_2_REG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_SCRATCH_3_REG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_SCRATCH_4_REG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_SCRATCH_5_REG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_SCRATCH_6_REG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_SCRATCH_7_REG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_LAST_AHB_ADDR, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_LAST_AHB_DATA, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_CORE_DBUG, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_LAST_AHB_ERR_ADDR, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_LAST_AHB_ERR_DATA, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_CURRENT_BL_BASE, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_CURRENT_BL_LEN, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_CURRENT_USED_AHB_BASE, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_DEBUG_STATUS, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_BUS_MISR_CFG_0, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_BUS_MISR_CFG_1, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_BUS_MISR_RD_VAL, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_PERF_MON_CTRL, CAM_REG_ATTR_READ_WRITE },
{ CAM_CDM_OFFSET_PERF_MON_0, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_PERF_MON_1, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_PERF_MON_2, CAM_REG_ATTR_READ },
{ CAM_CDM_OFFSET_SPARE, CAM_REG_ATTR_READ_WRITE }
};
struct cam_cdm_reg_offset_table cam170_cpas_cdm_offset_table = {
.first_offset = 0x0,
.last_offset = 0x200,
.reg_count = 0x30,
.offsets = cam170_cpas_cdm_register_offsets,
.offset_max_size = (sizeof(cam170_cpas_cdm_register_offsets)/
sizeof(struct cam_cdm_reg_offset)),
};
#endif /* _CAM_HW_CDM170_REG_H_ */

View file

@ -0,0 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
obj-$(CONFIG_SPECTRA_CAMERA) += cam_context.o cam_context_utils.o cam_node.o cam_subdev.o

View file

@ -0,0 +1,585 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/refcount.h>
#include "cam_context.h"
#include "cam_debug_util.h"
#include "cam_node.h"
static int cam_context_handle_hw_event(void *context, uint32_t evt_id,
void *evt_data)
{
int rc = 0;
struct cam_context *ctx = (struct cam_context *)context;
if (!ctx || !ctx->state_machine) {
CAM_ERR(CAM_CORE, "Context is not ready");
return -EINVAL;
}
if (ctx->state_machine[ctx->state].irq_ops)
rc = ctx->state_machine[ctx->state].irq_ops(ctx, evt_id,
evt_data);
else
CAM_DBG(CAM_CORE,
"No function to handle event %d in dev %d, state %d",
evt_id, ctx->dev_hdl, ctx->state);
return rc;
}
int cam_context_shutdown(struct cam_context *ctx)
{
int rc = 0;
struct cam_release_dev_cmd cmd;
mutex_lock(&ctx->ctx_mutex);
if (ctx->state > CAM_CTX_AVAILABLE && ctx->state < CAM_CTX_STATE_MAX) {
cmd.session_handle = ctx->session_hdl;
cmd.dev_handle = ctx->dev_hdl;
rc = cam_context_handle_release_dev(ctx, &cmd);
if (rc)
CAM_ERR(CAM_CORE,
"context release failed for dev_name %s",
ctx->dev_name);
else
cam_context_putref(ctx);
} else {
CAM_WARN(CAM_CORE,
"dev %s context id %u state %d invalid to release hdl",
ctx->dev_name, ctx->ctx_id, ctx->state);
rc = -EINVAL;
}
mutex_unlock(&ctx->ctx_mutex);
rc = cam_destroy_device_hdl(ctx->dev_hdl);
if (rc)
CAM_ERR(CAM_CORE, "destroy device hdl failed for node %s",
ctx->dev_name);
else
ctx->dev_hdl = -1;
return rc;
}
int cam_context_handle_crm_get_dev_info(struct cam_context *ctx,
struct cam_req_mgr_device_info *info)
{
int rc;
if (!ctx->state_machine) {
CAM_ERR(CAM_CORE, "Context is not ready");
return -EINVAL;
}
if (!info) {
CAM_ERR(CAM_CORE, "Invalid get device info payload");
return -EINVAL;
}
mutex_lock(&ctx->ctx_mutex);
if (ctx->state_machine[ctx->state].crm_ops.get_dev_info) {
rc = ctx->state_machine[ctx->state].crm_ops.get_dev_info(
ctx, info);
} else {
CAM_ERR(CAM_CORE, "No get device info in dev %d, state %d",
ctx->dev_hdl, ctx->state);
rc = -EPROTO;
}
mutex_unlock(&ctx->ctx_mutex);
return rc;
}
int cam_context_handle_crm_link(struct cam_context *ctx,
struct cam_req_mgr_core_dev_link_setup *link)
{
int rc;
if (!ctx->state_machine) {
CAM_ERR(CAM_CORE, "Context is not ready");
return -EINVAL;
}
if (!link) {
CAM_ERR(CAM_CORE, "Invalid link payload");
return -EINVAL;
}
mutex_lock(&ctx->ctx_mutex);
if (ctx->state_machine[ctx->state].crm_ops.link) {
rc = ctx->state_machine[ctx->state].crm_ops.link(ctx, link);
} else {
CAM_ERR(CAM_CORE, "No crm link in dev %d, state %d",
ctx->dev_hdl, ctx->state);
rc = -EPROTO;
}
mutex_unlock(&ctx->ctx_mutex);
return rc;
}
int cam_context_handle_crm_unlink(struct cam_context *ctx,
struct cam_req_mgr_core_dev_link_setup *unlink)
{
int rc;
if (!ctx->state_machine) {
CAM_ERR(CAM_CORE, "Context is not ready");
return -EINVAL;
}
if (!unlink) {
CAM_ERR(CAM_CORE, "Invalid unlink payload");
return -EINVAL;
}
mutex_lock(&ctx->ctx_mutex);
if (ctx->state_machine[ctx->state].crm_ops.unlink) {
rc = ctx->state_machine[ctx->state].crm_ops.unlink(
ctx, unlink);
} else {
CAM_ERR(CAM_CORE, "No crm unlink in dev %d, name %s, state %d",
ctx->dev_hdl, ctx->dev_name, ctx->state);
rc = -EPROTO;
}
mutex_unlock(&ctx->ctx_mutex);
return rc;
}
int cam_context_handle_crm_apply_req(struct cam_context *ctx,
struct cam_req_mgr_apply_request *apply)
{
int rc;
if (!ctx->state_machine) {
CAM_ERR(CAM_CORE, "Context is not ready");
return -EINVAL;
}
if (!apply) {
CAM_ERR(CAM_CORE, "Invalid apply request payload");
return -EINVAL;
}
if (ctx->state_machine[ctx->state].crm_ops.apply_req) {
rc = ctx->state_machine[ctx->state].crm_ops.apply_req(ctx,
apply);
} else {
CAM_ERR(CAM_CORE, "No crm apply req in dev %d, state %d",
ctx->dev_hdl, ctx->state);
rc = -EPROTO;
}
return rc;
}
int cam_context_handle_crm_flush_req(struct cam_context *ctx,
struct cam_req_mgr_flush_request *flush)
{
int rc;
if (!ctx->state_machine) {
CAM_ERR(CAM_CORE, "Context is not ready");
return -EINVAL;
}
mutex_lock(&ctx->ctx_mutex);
if (ctx->state_machine[ctx->state].crm_ops.flush_req) {
rc = ctx->state_machine[ctx->state].crm_ops.flush_req(ctx,
flush);
} else {
CAM_ERR(CAM_CORE, "No crm flush req in dev %d, state %d",
ctx->dev_hdl, ctx->state);
rc = -EPROTO;
}
mutex_unlock(&ctx->ctx_mutex);
return rc;
}
int cam_context_handle_crm_process_evt(struct cam_context *ctx,
struct cam_req_mgr_link_evt_data *process_evt)
{
int rc = 0;
if (!ctx->state_machine) {
CAM_ERR(CAM_CORE, "Context is not ready");
return -EINVAL;
}
mutex_lock(&ctx->ctx_mutex);
if (ctx->state_machine[ctx->state].crm_ops.process_evt) {
rc = ctx->state_machine[ctx->state].crm_ops.process_evt(ctx,
process_evt);
} else {
/* handling of this message is optional */
CAM_DBG(CAM_CORE, "No crm process evt in dev %d, state %d",
ctx->dev_hdl, ctx->state);
}
mutex_unlock(&ctx->ctx_mutex);
return rc;
}
int cam_context_dump_pf_info(struct cam_context *ctx, unsigned long iova,
uint32_t buf_info)
{
int rc = 0;
if (!ctx->state_machine) {
CAM_ERR(CAM_CORE, "Context is not ready");
return -EINVAL;
}
if (ctx->state_machine[ctx->state].pagefault_ops) {
rc = ctx->state_machine[ctx->state].pagefault_ops(ctx, iova,
buf_info);
} else {
CAM_WARN(CAM_CORE, "No dump ctx in dev %d, state %d",
ctx->dev_hdl, ctx->state);
}
return rc;
}
int cam_context_handle_acquire_dev(struct cam_context *ctx,
struct cam_acquire_dev_cmd *cmd)
{
int rc;
int i;
if (!ctx->state_machine) {
CAM_ERR(CAM_CORE, "Context is not ready");
return -EINVAL;
}
if (!cmd) {
CAM_ERR(CAM_CORE, "Invalid acquire device command payload");
return -EINVAL;
}
mutex_lock(&ctx->ctx_mutex);
if (ctx->state_machine[ctx->state].ioctl_ops.acquire_dev) {
rc = ctx->state_machine[ctx->state].ioctl_ops.acquire_dev(
ctx, cmd);
} else {
CAM_ERR(CAM_CORE, "No acquire device in dev %d, state %d",
cmd->dev_handle, ctx->state);
rc = -EPROTO;
}
INIT_LIST_HEAD(&ctx->active_req_list);
INIT_LIST_HEAD(&ctx->wait_req_list);
INIT_LIST_HEAD(&ctx->pending_req_list);
INIT_LIST_HEAD(&ctx->free_req_list);
for (i = 0; i < ctx->req_size; i++) {
INIT_LIST_HEAD(&ctx->req_list[i].list);
list_add_tail(&ctx->req_list[i].list, &ctx->free_req_list);
}
mutex_unlock(&ctx->ctx_mutex);
return rc;
}
int cam_context_handle_acquire_hw(struct cam_context *ctx,
void *args)
{
int rc;
if (!ctx->state_machine) {
CAM_ERR(CAM_CORE, "Context is not ready");
return -EINVAL;
}
if (!args) {
CAM_ERR(CAM_CORE, "Invalid acquire device hw command payload");
return -EINVAL;
}
mutex_lock(&ctx->ctx_mutex);
if (ctx->state_machine[ctx->state].ioctl_ops.acquire_hw) {
rc = ctx->state_machine[ctx->state].ioctl_ops.acquire_hw(
ctx, args);
} else {
CAM_ERR(CAM_CORE, "No acquire hw for dev %s, state %d",
ctx->dev_name, ctx->state);
rc = -EPROTO;
}
mutex_unlock(&ctx->ctx_mutex);
return rc;
}
int cam_context_handle_release_dev(struct cam_context *ctx,
struct cam_release_dev_cmd *cmd)
{
int rc;
if (!ctx->state_machine) {
CAM_ERR(CAM_CORE, "Context is not ready");
return -EINVAL;
}
if (!cmd) {
CAM_ERR(CAM_CORE, "Invalid release device command payload");
return -EINVAL;
}
mutex_lock(&ctx->ctx_mutex);
if (ctx->state_machine[ctx->state].ioctl_ops.release_dev) {
rc = ctx->state_machine[ctx->state].ioctl_ops.release_dev(
ctx, cmd);
} else {
CAM_ERR(CAM_CORE, "No release device in dev %d, state %d",
ctx->dev_hdl, ctx->state);
rc = -EPROTO;
}
mutex_unlock(&ctx->ctx_mutex);
return rc;
}
int cam_context_handle_release_hw(struct cam_context *ctx,
void *args)
{
int rc;
if (!ctx->state_machine) {
CAM_ERR(CAM_CORE, "Context is not ready");
return -EINVAL;
}
if (!args) {
CAM_ERR(CAM_CORE, "Invalid release HW command payload");
return -EINVAL;
}
mutex_lock(&ctx->ctx_mutex);
if (ctx->state_machine[ctx->state].ioctl_ops.release_hw) {
rc = ctx->state_machine[ctx->state].ioctl_ops.release_hw(
ctx, args);
} else {
CAM_ERR(CAM_CORE, "No release hw for dev %s, state %d",
ctx->dev_name, ctx->state);
rc = -EPROTO;
}
mutex_unlock(&ctx->ctx_mutex);
return rc;
}
int cam_context_handle_flush_dev(struct cam_context *ctx,
struct cam_flush_dev_cmd *cmd)
{
int rc = 0;
if (!ctx->state_machine) {
CAM_ERR(CAM_CORE, "Context is not ready");
return -EINVAL;
}
if (!cmd) {
CAM_ERR(CAM_CORE, "Invalid flush device command payload");
return -EINVAL;
}
mutex_lock(&ctx->ctx_mutex);
if (ctx->state_machine[ctx->state].ioctl_ops.flush_dev) {
rc = ctx->state_machine[ctx->state].ioctl_ops.flush_dev(
ctx, cmd);
} else {
CAM_WARN(CAM_CORE, "No flush device in dev %d, state %d",
ctx->dev_hdl, ctx->state);
}
mutex_unlock(&ctx->ctx_mutex);
return rc;
}
int cam_context_handle_config_dev(struct cam_context *ctx,
struct cam_config_dev_cmd *cmd)
{
int rc;
if (!ctx->state_machine) {
CAM_ERR(CAM_CORE, "context is not ready");
return -EINVAL;
}
if (!cmd) {
CAM_ERR(CAM_CORE, "Invalid config device command payload");
return -EINVAL;
}
mutex_lock(&ctx->ctx_mutex);
if (ctx->state_machine[ctx->state].ioctl_ops.config_dev) {
rc = ctx->state_machine[ctx->state].ioctl_ops.config_dev(
ctx, cmd);
} else {
CAM_ERR(CAM_CORE, "No config device in dev %d, state %d",
ctx->dev_hdl, ctx->state);
rc = -EPROTO;
}
mutex_unlock(&ctx->ctx_mutex);
return rc;
}
int cam_context_handle_start_dev(struct cam_context *ctx,
struct cam_start_stop_dev_cmd *cmd)
{
int rc = 0;
if (!ctx || !ctx->state_machine) {
CAM_ERR(CAM_CORE, "Context is not ready");
return -EINVAL;
}
if (!cmd) {
CAM_ERR(CAM_CORE, "Invalid start device command payload");
return -EINVAL;
}
mutex_lock(&ctx->ctx_mutex);
if (ctx->state_machine[ctx->state].ioctl_ops.start_dev)
rc = ctx->state_machine[ctx->state].ioctl_ops.start_dev(
ctx, cmd);
else
/* start device can be optional for some driver */
CAM_DBG(CAM_CORE, "No start device in dev %d, state %d",
ctx->dev_hdl, ctx->state);
mutex_unlock(&ctx->ctx_mutex);
return rc;
}
int cam_context_handle_stop_dev(struct cam_context *ctx,
struct cam_start_stop_dev_cmd *cmd)
{
int rc = 0;
if (!ctx || !ctx->state_machine) {
CAM_ERR(CAM_CORE, "Context is not ready");
return -EINVAL;
}
if (!cmd) {
CAM_ERR(CAM_CORE, "Invalid stop device command payload");
return -EINVAL;
}
mutex_lock(&ctx->ctx_mutex);
if (ctx->state_machine[ctx->state].ioctl_ops.stop_dev)
rc = ctx->state_machine[ctx->state].ioctl_ops.stop_dev(
ctx, cmd);
else
/* stop device can be optional for some driver */
CAM_WARN(CAM_CORE, "No stop device in dev %d, name %s state %d",
ctx->dev_hdl, ctx->dev_name, ctx->state);
ctx->last_flush_req = 0;
mutex_unlock(&ctx->ctx_mutex);
return rc;
}
int cam_context_init(struct cam_context *ctx,
const char *dev_name,
uint64_t dev_id,
uint32_t ctx_id,
struct cam_req_mgr_kmd_ops *crm_node_intf,
struct cam_hw_mgr_intf *hw_mgr_intf,
struct cam_ctx_request *req_list,
uint32_t req_size)
{
int i;
/* crm_node_intf is optinal */
if (!ctx || !hw_mgr_intf || !req_list) {
CAM_ERR(CAM_CORE, "Invalid input parameters");
return -EINVAL;
}
memset(ctx, 0, sizeof(*ctx));
ctx->dev_hdl = -1;
ctx->link_hdl = -1;
ctx->session_hdl = -1;
INIT_LIST_HEAD(&ctx->list);
mutex_init(&ctx->ctx_mutex);
mutex_init(&ctx->sync_mutex);
spin_lock_init(&ctx->lock);
ctx->dev_name = dev_name;
ctx->dev_id = dev_id;
ctx->ctx_id = ctx_id;
ctx->ctx_crm_intf = NULL;
ctx->crm_ctx_intf = crm_node_intf;
ctx->hw_mgr_intf = hw_mgr_intf;
ctx->irq_cb_intf = cam_context_handle_hw_event;
INIT_LIST_HEAD(&ctx->active_req_list);
INIT_LIST_HEAD(&ctx->wait_req_list);
INIT_LIST_HEAD(&ctx->pending_req_list);
INIT_LIST_HEAD(&ctx->free_req_list);
ctx->req_list = req_list;
ctx->req_size = req_size;
for (i = 0; i < req_size; i++) {
INIT_LIST_HEAD(&ctx->req_list[i].list);
list_add_tail(&ctx->req_list[i].list, &ctx->free_req_list);
ctx->req_list[i].ctx = ctx;
}
ctx->state = CAM_CTX_AVAILABLE;
ctx->state_machine = NULL;
ctx->ctx_priv = NULL;
return 0;
}
int cam_context_deinit(struct cam_context *ctx)
{
if (!ctx)
return -EINVAL;
/**
* This is called from platform device remove.
* Everyting should be released at this moment.
* so we just free the memory for the context
*/
if (ctx->state != CAM_CTX_AVAILABLE)
CAM_ERR(CAM_CORE, "Device did not shutdown cleanly");
memset(ctx, 0, sizeof(*ctx));
return 0;
}
void cam_context_putref(struct cam_context *ctx)
{
kref_put(&ctx->refcount, cam_node_put_ctxt_to_free_list);
CAM_DBG(CAM_CORE,
"ctx device hdl %ld, ref count %d, dev_name %s",
ctx->dev_hdl, refcount_read(&(ctx->refcount.refcount)),
ctx->dev_name);
}
void cam_context_getref(struct cam_context *ctx)
{
if (kref_get_unless_zero(&ctx->refcount) == 0) {
/* should never happen */
WARN(1, "%s fail\n", __func__);
}
CAM_DBG(CAM_CORE,
"ctx device hdl %ld, ref count %d, dev_name %s",
ctx->dev_hdl, refcount_read(&(ctx->refcount.refcount)),
ctx->dev_name);
}

View file

@ -0,0 +1,460 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_CONTEXT_H_
#define _CAM_CONTEXT_H_
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/kref.h>
#include "cam_req_mgr_interface.h"
#include "cam_hw_mgr_intf.h"
/* Forward declarations */
struct cam_context;
/* max request number */
#define CAM_CTX_REQ_MAX 20
#define CAM_CTX_CFG_MAX 20
#define CAM_CTX_RES_MAX 20
/**
* enum cam_ctx_state - context top level states
*
*/
enum cam_context_state {
CAM_CTX_UNINIT = 0,
CAM_CTX_AVAILABLE = 1,
CAM_CTX_ACQUIRED = 2,
CAM_CTX_READY = 3,
CAM_CTX_ACTIVATED = 4,
CAM_CTX_STATE_MAX = 5,
};
/**
* struct cam_ctx_request - Common request structure for the context
*
* @list: Link list entry
* @status: Request status
* @request_id: Request id
* @req_priv: Derived request object
* @hw_update_entries: Hardware update entries
* @num_hw_update_entries: Number of hardware update entries
* @in_map_entries: Entries for in fences
* @num_in_map_entries: Number of in map entries
* @out_map_entries: Entries for out fences
* @num_out_map_entries: Number of out map entries
* @num_in_acked: Number of in fence acked
* @num_out_acked: Number of out fence acked
* @flushed: Request is flushed
* @ctx: The context to which this request belongs
* @pf_data page fault debug data
*
*/
struct cam_ctx_request {
struct list_head list;
uint32_t status;
uint64_t request_id;
void *req_priv;
struct cam_hw_update_entry hw_update_entries[CAM_CTX_CFG_MAX];
uint32_t num_hw_update_entries;
struct cam_hw_fence_map_entry in_map_entries[CAM_CTX_CFG_MAX];
uint32_t num_in_map_entries;
struct cam_hw_fence_map_entry out_map_entries[CAM_CTX_CFG_MAX];
uint32_t num_out_map_entries;
atomic_t num_in_acked;
uint32_t num_out_acked;
int flushed;
struct cam_context *ctx;
struct cam_hw_mgr_dump_pf_data pf_data;
};
/**
* struct cam_ctx_ioctl_ops - Function table for handling IOCTL calls
*
* @acquire_dev: Function pointer for acquire device
* @release_dev: Function pointer for release device
* @config_dev: Function pointer for config device
* @start_dev: Function pointer for start device
* @stop_dev: Function pointer for stop device
* @flush_dev: Function pointer for flush device
* @acquire_hw: Function pointer for acquire hw
* @release_hw: Function pointer for release hw
*
*/
struct cam_ctx_ioctl_ops {
int (*acquire_dev)(struct cam_context *ctx,
struct cam_acquire_dev_cmd *cmd);
int (*release_dev)(struct cam_context *ctx,
struct cam_release_dev_cmd *cmd);
int (*config_dev)(struct cam_context *ctx,
struct cam_config_dev_cmd *cmd);
int (*start_dev)(struct cam_context *ctx,
struct cam_start_stop_dev_cmd *cmd);
int (*stop_dev)(struct cam_context *ctx,
struct cam_start_stop_dev_cmd *cmd);
int (*flush_dev)(struct cam_context *ctx,
struct cam_flush_dev_cmd *cmd);
int (*acquire_hw)(struct cam_context *ctx, void *args);
int (*release_hw)(struct cam_context *ctx, void *args);
};
/**
* struct cam_ctx_crm_ops - Function table for handling CRM to context calls
*
* @get_dev_info: Get device informaiton
* @link: Link the context
* @unlink: Unlink the context
* @apply_req: Apply setting for the context
* @flush_req: Flush request to remove request ids
* @process_evt: Handle event notification from CRM.(optional)
*
*/
struct cam_ctx_crm_ops {
int (*get_dev_info)(struct cam_context *ctx,
struct cam_req_mgr_device_info *device_info);
int (*link)(struct cam_context *ctx,
struct cam_req_mgr_core_dev_link_setup *link);
int (*unlink)(struct cam_context *ctx,
struct cam_req_mgr_core_dev_link_setup *unlink);
int (*apply_req)(struct cam_context *ctx,
struct cam_req_mgr_apply_request *apply);
int (*flush_req)(struct cam_context *ctx,
struct cam_req_mgr_flush_request *flush);
int (*process_evt)(struct cam_context *ctx,
struct cam_req_mgr_link_evt_data *evt_data);
};
/**
* struct cam_ctx_ops - Collection of the interface funciton tables
*
* @ioctl_ops: Ioctl funciton table
* @crm_ops: CRM to context interface function table
* @irq_ops: Hardware event handle function
* @pagefault_ops: Function to be called on page fault
*
*/
struct cam_ctx_ops {
struct cam_ctx_ioctl_ops ioctl_ops;
struct cam_ctx_crm_ops crm_ops;
cam_hw_event_cb_func irq_ops;
cam_hw_pagefault_cb_func pagefault_ops;
};
/**
* struct cam_context - camera context object for the subdevice node
*
* @dev_name: String giving name of device associated
* @dev_id: ID of device associated
* @ctx_id: ID for this context
* @list: Link list entry
* @sessoin_hdl: Session handle
* @dev_hdl: Device handle
* @link_hdl: Link handle
* @ctx_mutex: Mutex for ioctl calls
* @lock: Spin lock
* @active_req_list: Requests pending for done event
* @pending_req_list: Requests pending for reg upd event
* @wait_req_list: Requests waiting for apply
* @free_req_list: Requests that are free
* @req_list: Reference to the request storage
* @req_size: Size of the request storage
* @hw_mgr_intf: Context to HW interface
* @ctx_crm_intf: Context to CRM interface
* @crm_ctx_intf: CRM to context interface
* @irq_cb_intf: HW to context callback interface
* @state: Current state for top level state machine
* @state_machine: Top level state machine
* @ctx_priv: Private context pointer
* @ctxt_to_hw_map: Context to hardware mapping pointer
* @refcount: Context object refcount
* @node: The main node to which this context belongs
* @sync_mutex: mutex to sync with sync cb thread
* @last_flush_req: Last request to flush
*
*/
struct cam_context {
const char *dev_name;
uint64_t dev_id;
uint32_t ctx_id;
struct list_head list;
int32_t session_hdl;
int32_t dev_hdl;
int32_t link_hdl;
struct mutex ctx_mutex;
spinlock_t lock;
struct list_head active_req_list;
struct list_head pending_req_list;
struct list_head wait_req_list;
struct list_head free_req_list;
struct cam_ctx_request *req_list;
uint32_t req_size;
struct cam_hw_mgr_intf *hw_mgr_intf;
struct cam_req_mgr_crm_cb *ctx_crm_intf;
struct cam_req_mgr_kmd_ops *crm_ctx_intf;
cam_hw_event_cb_func irq_cb_intf;
enum cam_context_state state;
struct cam_ctx_ops *state_machine;
void *ctx_priv;
void *ctxt_to_hw_map;
struct kref refcount;
void *node;
struct mutex sync_mutex;
uint32_t last_flush_req;
};
/**
* cam_context_shutdown()
*
* @brief: Calls while device close or shutdown
*
* @ctx: Object pointer for cam_context
*
*/
int cam_context_shutdown(struct cam_context *ctx);
/**
* cam_context_handle_crm_get_dev_info()
*
* @brief: Handle get device information command
*
* @ctx: Object pointer for cam_context
* @info: Device information returned
*
*/
int cam_context_handle_crm_get_dev_info(struct cam_context *ctx,
struct cam_req_mgr_device_info *info);
/**
* cam_context_handle_crm_link()
*
* @brief: Handle link command
*
* @ctx: Object pointer for cam_context
* @link: Link command payload
*
*/
int cam_context_handle_crm_link(struct cam_context *ctx,
struct cam_req_mgr_core_dev_link_setup *link);
/**
* cam_context_handle_crm_unlink()
*
* @brief: Handle unlink command
*
* @ctx: Object pointer for cam_context
* @unlink: Unlink command payload
*
*/
int cam_context_handle_crm_unlink(struct cam_context *ctx,
struct cam_req_mgr_core_dev_link_setup *unlink);
/**
* cam_context_handle_crm_apply_req()
*
* @brief: Handle apply request command
*
* @ctx: Object pointer for cam_context
* @apply: Apply request command payload
*
*/
int cam_context_handle_crm_apply_req(struct cam_context *ctx,
struct cam_req_mgr_apply_request *apply);
/**
* cam_context_handle_crm_flush_req()
*
* @brief: Handle flush request command
*
* @ctx: Object pointer for cam_context
* @apply: Flush request command payload
*
*/
int cam_context_handle_crm_flush_req(struct cam_context *ctx,
struct cam_req_mgr_flush_request *apply);
/**
* cam_context_handle_crm_process_evt()
*
* @brief: Handle process event command
*
* @ctx: Object pointer for cam_context
* @process_evt: process event command payload
*
*/
int cam_context_handle_crm_process_evt(struct cam_context *ctx,
struct cam_req_mgr_link_evt_data *process_evt);
/**
* cam_context_dump_pf_info()
*
* @brief: Handle dump active request request command
*
* @ctx: Object pointer for cam_context
* @iova: Page fault address
* @buf_info: Information about closest memory handle
*
*/
int cam_context_dump_pf_info(struct cam_context *ctx, unsigned long iova,
uint32_t buf_info);
/**
* cam_context_handle_acquire_dev()
*
* @brief: Handle acquire device command
*
* @ctx: Object pointer for cam_context
* @cmd: Acquire device command payload
*
*/
int cam_context_handle_acquire_dev(struct cam_context *ctx,
struct cam_acquire_dev_cmd *cmd);
/**
* cam_context_handle_acquire_hw()
*
* @brief: Handle acquire HW command
*
* @ctx: Object pointer for cam_context
* @cmd: Acquire HW command payload
*
*/
int cam_context_handle_acquire_hw(struct cam_context *ctx,
void *cmd);
/**
* cam_context_handle_release_dev()
*
* @brief: Handle release device command
*
* @ctx: Object pointer for cam_context
* @cmd: Release device command payload
*
*/
int cam_context_handle_release_dev(struct cam_context *ctx,
struct cam_release_dev_cmd *cmd);
/**
* cam_context_handle_release_hw()
*
* @brief: Handle release HW command
*
* @ctx: Object pointer for cam_context
* @cmd: Release HW command payload
*
*/
int cam_context_handle_release_hw(struct cam_context *ctx,
void *cmd);
/**
* cam_context_handle_config_dev()
*
* @brief: Handle config device command
*
* @ctx: Object pointer for cam_context
* @cmd: Config device command payload
*
*/
int cam_context_handle_config_dev(struct cam_context *ctx,
struct cam_config_dev_cmd *cmd);
/**
* cam_context_handle_flush_dev()
*
* @brief: Handle flush device command
*
* @ctx: Object pointer for cam_context
* @cmd: Flush device command payload
*
*/
int cam_context_handle_flush_dev(struct cam_context *ctx,
struct cam_flush_dev_cmd *cmd);
/**
* cam_context_handle_start_dev()
*
* @brief: Handle start device command
*
* @ctx: Object pointer for cam_context
* @cmd: Start device command payload
*
*/
int cam_context_handle_start_dev(struct cam_context *ctx,
struct cam_start_stop_dev_cmd *cmd);
/**
* cam_context_handle_stop_dev()
*
* @brief: Handle stop device command
*
* @ctx: Object pointer for cam_context
* @cmd: Stop device command payload
*
*/
int cam_context_handle_stop_dev(struct cam_context *ctx,
struct cam_start_stop_dev_cmd *cmd);
/**
* cam_context_deinit()
*
* @brief: Camera context deinitialize function
*
* @ctx: Object pointer for cam_context
*
*/
int cam_context_deinit(struct cam_context *ctx);
/**
* cam_context_init()
*
* @brief: Camera context initialize function
*
* @ctx: Object pointer for cam_context
* @dev_name: String giving name of device associated
* @dev_id: ID of the device associated
* @ctx_id: ID for this context
* @crm_node_intf: Function table for crm to context interface
* @hw_mgr_intf: Function table for context to hw interface
* @req_list: Requests storage
* @req_size: Size of the request storage
*
*/
int cam_context_init(struct cam_context *ctx,
const char *dev_name,
uint64_t dev_id,
uint32_t ctx_id,
struct cam_req_mgr_kmd_ops *crm_node_intf,
struct cam_hw_mgr_intf *hw_mgr_intf,
struct cam_ctx_request *req_list,
uint32_t req_size);
/**
* cam_context_putref()
*
* @brief: Put back context reference.
*
* @ctx: Context for which ref is returned
*
*/
void cam_context_putref(struct cam_context *ctx);
/**
* cam_context_getref()
*
* @brief: Get back context reference.
*
* @ctx: Context for which ref is taken
*
*/
void cam_context_getref(struct cam_context *ctx);
#endif /* _CAM_CONTEXT_H_ */

View file

@ -0,0 +1,948 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/debugfs.h>
#include <linux/videodev2.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <media/cam_sync.h>
#include <media/cam_defs.h>
#include "cam_context.h"
#include "cam_context_utils.h"
#include "cam_mem_mgr.h"
#include "cam_node.h"
#include "cam_req_mgr_util.h"
#include "cam_sync_api.h"
#include "cam_trace.h"
#include "cam_debug_util.h"
static uint cam_debug_ctx_req_list;
module_param(cam_debug_ctx_req_list, uint, 0644);
static inline int cam_context_validate_thread(void)
{
if (in_interrupt()) {
WARN(1, "Invalid execution context\n");
return -EINVAL;
}
return 0;
}
int cam_context_buf_done_from_hw(struct cam_context *ctx,
void *done_event_data, uint32_t bubble_state)
{
int j;
int result;
struct cam_ctx_request *req;
struct cam_hw_done_event_data *done =
(struct cam_hw_done_event_data *)done_event_data;
int rc;
if (!ctx || !done) {
CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, done);
return -EINVAL;
}
rc = cam_context_validate_thread();
if (rc)
return rc;
spin_lock(&ctx->lock);
if (list_empty(&ctx->active_req_list)) {
CAM_ERR(CAM_CTXT, "[%s][%d] no active request",
ctx->dev_name, ctx->ctx_id);
spin_unlock(&ctx->lock);
return -EIO;
}
req = list_first_entry(&ctx->active_req_list,
struct cam_ctx_request, list);
trace_cam_buf_done("UTILS", ctx, req);
if (done->request_id != req->request_id) {
CAM_ERR(CAM_CTXT,
"[%s][%d] mismatch: done req[%lld], active req[%lld]",
ctx->dev_name, ctx->ctx_id,
done->request_id, req->request_id);
spin_unlock(&ctx->lock);
return -EIO;
}
if (!req->num_out_map_entries) {
CAM_ERR(CAM_CTXT, "[%s][%d] no output fence to signal",
ctx->dev_name, ctx->ctx_id);
spin_unlock(&ctx->lock);
return -EIO;
}
/*
* since another thread may be adding/removing from active
* list, so hold the lock
*/
list_del_init(&req->list);
spin_unlock(&ctx->lock);
if (!bubble_state) {
result = CAM_SYNC_STATE_SIGNALED_SUCCESS;
} else {
CAM_DBG(CAM_REQ,
"[%s][ctx_id %d] : req[%llu] is done with error",
ctx->dev_name, ctx->ctx_id, req->request_id);
for (j = 0; j < req->num_out_map_entries; j++)
CAM_DBG(CAM_REQ, "fence %d signaled with error",
req->out_map_entries[j].sync_id);
result = CAM_SYNC_STATE_SIGNALED_ERROR;
}
for (j = 0; j < req->num_out_map_entries; j++) {
cam_sync_signal(req->out_map_entries[j].sync_id, result);
req->out_map_entries[j].sync_id = -1;
}
if (cam_debug_ctx_req_list & ctx->dev_id)
CAM_INFO(CAM_CTXT,
"[%s][%d] : Moving req[%llu] from active_list to free_list",
ctx->dev_name, ctx->ctx_id, req->request_id);
/*
* another thread may be adding/removing from free list,
* so hold the lock
*/
spin_lock(&ctx->lock);
list_add_tail(&req->list, &ctx->free_req_list);
req->ctx = NULL;
spin_unlock(&ctx->lock);
return 0;
}
static int cam_context_apply_req_to_hw(struct cam_ctx_request *req,
struct cam_req_mgr_apply_request *apply)
{
int rc = 0;
struct cam_context *ctx = req->ctx;
struct cam_hw_config_args cfg;
if (!ctx->hw_mgr_intf) {
CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
ctx->dev_name, ctx->ctx_id);
rc = -EFAULT;
goto end;
}
spin_lock(&ctx->lock);
list_del_init(&req->list);
list_add_tail(&req->list, &ctx->active_req_list);
spin_unlock(&ctx->lock);
if (cam_debug_ctx_req_list & ctx->dev_id)
CAM_INFO(CAM_CTXT,
"[%s][%d] : Moving req[%llu] from pending_list to active_list",
ctx->dev_name, ctx->ctx_id, req->request_id);
cfg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
cfg.request_id = req->request_id;
cfg.hw_update_entries = req->hw_update_entries;
cfg.num_hw_update_entries = req->num_hw_update_entries;
cfg.out_map_entries = req->out_map_entries;
cfg.num_out_map_entries = req->num_out_map_entries;
cfg.priv = req->req_priv;
rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
if (rc) {
spin_lock(&ctx->lock);
list_del_init(&req->list);
list_add_tail(&req->list, &ctx->free_req_list);
spin_unlock(&ctx->lock);
if (cam_debug_ctx_req_list & ctx->dev_id)
CAM_INFO(CAM_CTXT,
"[%s][%d] : Moving req[%llu] from active_list to free_list",
ctx->dev_name, ctx->ctx_id, req->request_id);
}
end:
return rc;
}
static void cam_context_sync_callback(int32_t sync_obj, int status, void *data)
{
struct cam_ctx_request *req = data;
struct cam_context *ctx = NULL;
struct cam_flush_dev_cmd flush_cmd;
struct cam_req_mgr_apply_request apply;
int rc;
if (!req) {
CAM_ERR(CAM_CTXT, "Invalid input param");
return;
}
rc = cam_context_validate_thread();
if (rc)
return;
ctx = req->ctx;
if (!ctx) {
CAM_ERR(CAM_CTXT, "Invalid ctx for req %llu", req->request_id);
return;
}
if (atomic_inc_return(&req->num_in_acked) == req->num_in_map_entries) {
apply.request_id = req->request_id;
/*
* take mutex to ensure that another thread does
* not flush the request while this
* thread is submitting it to h/w. The submit to
* h/w and adding to the active list should happen
* in a critical section which is provided by this
* mutex.
*/
if (status == CAM_SYNC_STATE_SIGNALED_ERROR) {
CAM_DBG(CAM_CTXT, "fence error: %d", sync_obj);
flush_cmd.req_id = req->request_id;
cam_context_flush_req_to_hw(ctx, &flush_cmd);
}
mutex_lock(&ctx->sync_mutex);
if (!req->flushed) {
cam_context_apply_req_to_hw(req, &apply);
mutex_unlock(&ctx->sync_mutex);
} else {
req->flushed = 0;
req->ctx = NULL;
mutex_unlock(&ctx->sync_mutex);
spin_lock(&ctx->lock);
list_del_init(&req->list);
list_add_tail(&req->list, &ctx->free_req_list);
spin_unlock(&ctx->lock);
if (cam_debug_ctx_req_list & ctx->dev_id)
CAM_INFO(CAM_CTXT,
"[%s][%d] : Moving req[%llu] from pending_list to free_list",
ctx->dev_name, ctx->ctx_id,
req->request_id);
}
}
cam_context_putref(ctx);
}
int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
struct cam_release_dev_cmd *cmd)
{
struct cam_hw_release_args arg;
if (!ctx) {
CAM_ERR(CAM_CTXT, "Invalid input param");
return -EINVAL;
}
if ((!ctx->hw_mgr_intf) || (!ctx->hw_mgr_intf->hw_release)) {
CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
ctx->dev_name, ctx->ctx_id);
return -EINVAL;
}
arg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
arg.active_req = false;
ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &arg);
ctx->ctxt_to_hw_map = NULL;
ctx->session_hdl = -1;
ctx->dev_hdl = -1;
ctx->link_hdl = -1;
return 0;
}
int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
struct cam_config_dev_cmd *cmd)
{
int rc = 0;
struct cam_ctx_request *req = NULL;
struct cam_hw_prepare_update_args cfg;
uintptr_t packet_addr;
struct cam_packet *packet;
size_t len = 0;
int32_t i = 0, j = 0;
if (!ctx || !cmd) {
CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd);
rc = -EINVAL;
goto end;
}
if (!ctx->hw_mgr_intf) {
CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
ctx->dev_name, ctx->ctx_id);
rc = -EFAULT;
goto end;
}
rc = cam_context_validate_thread();
if (rc)
return rc;
spin_lock(&ctx->lock);
if (!list_empty(&ctx->free_req_list)) {
req = list_first_entry(&ctx->free_req_list,
struct cam_ctx_request, list);
list_del_init(&req->list);
}
spin_unlock(&ctx->lock);
if (!req) {
CAM_ERR(CAM_CTXT, "[%s][%d] No more request obj free",
ctx->dev_name, ctx->ctx_id);
rc = -ENOMEM;
goto end;
}
memset(req, 0, sizeof(*req));
INIT_LIST_HEAD(&req->list);
req->ctx = ctx;
/* for config dev, only memory handle is supported */
/* map packet from the memhandle */
rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
&packet_addr, &len);
if (rc != 0) {
CAM_ERR(CAM_CTXT, "[%s][%d] Can not get packet address",
ctx->dev_name, ctx->ctx_id);
rc = -EINVAL;
goto free_req;
}
packet = (struct cam_packet *) ((uint8_t *)packet_addr +
(uint32_t)cmd->offset);
if (packet->header.request_id <= ctx->last_flush_req) {
CAM_DBG(CAM_CORE,
"request %lld has been flushed, reject packet",
packet->header.request_id);
rc = -EINVAL;
goto free_req;
}
if (packet->header.request_id > ctx->last_flush_req)
ctx->last_flush_req = 0;
/* preprocess the configuration */
memset(&cfg, 0, sizeof(cfg));
cfg.packet = packet;
cfg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
cfg.max_hw_update_entries = CAM_CTX_CFG_MAX;
cfg.num_hw_update_entries = req->num_hw_update_entries;
cfg.hw_update_entries = req->hw_update_entries;
cfg.max_out_map_entries = CAM_CTX_CFG_MAX;
cfg.out_map_entries = req->out_map_entries;
cfg.max_in_map_entries = CAM_CTX_CFG_MAX;
cfg.in_map_entries = req->in_map_entries;
cfg.pf_data = &(req->pf_data);
rc = ctx->hw_mgr_intf->hw_prepare_update(
ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
if (rc != 0) {
CAM_ERR(CAM_CTXT,
"[%s][%d] Prepare config packet failed in HW layer",
ctx->dev_name, ctx->ctx_id);
rc = -EFAULT;
goto free_req;
}
req->num_hw_update_entries = cfg.num_hw_update_entries;
req->num_out_map_entries = cfg.num_out_map_entries;
req->num_in_map_entries = cfg.num_in_map_entries;
atomic_set(&req->num_in_acked, 0);
req->request_id = packet->header.request_id;
req->status = 1;
req->req_priv = cfg.priv;
for (i = 0; i < req->num_out_map_entries; i++) {
rc = cam_sync_get_obj_ref(req->out_map_entries[i].sync_id);
if (rc) {
CAM_ERR(CAM_CTXT, "Can't get ref for sync %d",
req->out_map_entries[i].sync_id);
goto put_ref;
}
}
if (req->num_in_map_entries > 0) {
spin_lock(&ctx->lock);
list_add_tail(&req->list, &ctx->pending_req_list);
spin_unlock(&ctx->lock);
if (cam_debug_ctx_req_list & ctx->dev_id)
CAM_INFO(CAM_CTXT,
"[%s][%d] : Moving req[%llu] from free_list to pending_list",
ctx->dev_name, ctx->ctx_id, req->request_id);
for (j = 0; j < req->num_in_map_entries; j++) {
cam_context_getref(ctx);
rc = cam_sync_register_callback(
cam_context_sync_callback,
(void *)req,
req->in_map_entries[j].sync_id);
if (rc) {
CAM_ERR(CAM_CTXT,
"[%s][%d] Failed register fence cb: %d ret = %d",
ctx->dev_name, ctx->ctx_id,
req->in_map_entries[j].sync_id, rc);
spin_lock(&ctx->lock);
list_del_init(&req->list);
spin_unlock(&ctx->lock);
if (cam_debug_ctx_req_list & ctx->dev_id)
CAM_INFO(CAM_CTXT,
"[%s][%d] : Moving req[%llu] from pending_list to free_list",
ctx->dev_name, ctx->ctx_id,
req->request_id);
cam_context_putref(ctx);
goto put_ref;
}
CAM_DBG(CAM_CTXT, "register in fence cb: %d ret = %d",
req->in_map_entries[j].sync_id, rc);
}
goto end;
}
return rc;
put_ref:
for (--i; i >= 0; i--) {
rc = cam_sync_put_obj_ref(req->out_map_entries[i].sync_id);
if (rc)
CAM_ERR(CAM_CTXT, "Failed to put ref of fence %d",
req->out_map_entries[i].sync_id);
}
free_req:
spin_lock(&ctx->lock);
list_add_tail(&req->list, &ctx->free_req_list);
req->ctx = NULL;
spin_unlock(&ctx->lock);
end:
return rc;
}
int32_t cam_context_acquire_dev_to_hw(struct cam_context *ctx,
struct cam_acquire_dev_cmd *cmd)
{
int rc;
struct cam_hw_acquire_args param;
struct cam_create_dev_hdl req_hdl_param;
struct cam_hw_release_args release;
if (!ctx || !cmd) {
CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd);
rc = -EINVAL;
goto end;
}
if (!ctx->hw_mgr_intf) {
CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
ctx->dev_name, ctx->ctx_id);
rc = -EFAULT;
goto end;
}
CAM_DBG(CAM_CTXT, "ses hdl: %x, num_res: %d, type: %d, res: %lld",
cmd->session_handle, cmd->num_resources, cmd->handle_type,
cmd->resource_hdl);
if (cmd->num_resources > CAM_CTX_RES_MAX) {
CAM_ERR(CAM_CTXT, "[%s][%d] resource limit exceeded",
ctx->dev_name, ctx->ctx_id);
rc = -ENOMEM;
goto end;
}
/* for now we only support user pointer */
if (cmd->handle_type != 1) {
CAM_ERR(CAM_CTXT, "[%s][%d] Only user pointer is supported",
ctx->dev_name, ctx->ctx_id);
rc = -EINVAL;
goto end;
}
/* fill in parameters */
param.context_data = ctx;
param.event_cb = ctx->irq_cb_intf;
param.num_acq = cmd->num_resources;
param.acquire_info = cmd->resource_hdl;
/* call HW manager to reserve the resource */
rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
&param);
if (rc != 0) {
CAM_ERR(CAM_CTXT, "[%s][%d] Acquire device failed",
ctx->dev_name, ctx->ctx_id);
goto end;
}
ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
/* if hw resource acquire successful, acquire dev handle */
req_hdl_param.session_hdl = cmd->session_handle;
/* bridge is not ready for these flags. so false for now */
req_hdl_param.v4l2_sub_dev_flag = 0;
req_hdl_param.media_entity_flag = 0;
req_hdl_param.priv = ctx;
req_hdl_param.ops = ctx->crm_ctx_intf;
ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
if (ctx->dev_hdl <= 0) {
rc = -EFAULT;
CAM_ERR(CAM_CTXT, "[%s][%d] Can not create device handle",
ctx->dev_name, ctx->ctx_id);
goto free_hw;
}
cmd->dev_handle = ctx->dev_hdl;
/* store session information */
ctx->session_hdl = cmd->session_handle;
return rc;
free_hw:
release.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release);
ctx->ctxt_to_hw_map = NULL;
ctx->dev_hdl = -1;
end:
return rc;
}
int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
{
struct cam_hw_flush_args flush_args;
struct list_head temp_list;
struct cam_ctx_request *req;
uint32_t i;
int rc = 0;
bool free_req;
CAM_DBG(CAM_CTXT, "[%s] E: NRT flush ctx", ctx->dev_name);
memset(&flush_args, 0, sizeof(flush_args));
/*
* flush pending requests, take the sync lock to synchronize with the
* sync callback thread so that the sync cb thread does not try to
* submit request to h/w while the request is being flushed
*/
mutex_lock(&ctx->sync_mutex);
INIT_LIST_HEAD(&temp_list);
spin_lock(&ctx->lock);
list_splice_init(&ctx->pending_req_list, &temp_list);
spin_unlock(&ctx->lock);
if (cam_debug_ctx_req_list & ctx->dev_id)
CAM_INFO(CAM_CTXT,
"[%s][%d] : Moving all pending requests from pending_list to temp_list",
ctx->dev_name, ctx->ctx_id);
flush_args.num_req_pending = 0;
while (true) {
spin_lock(&ctx->lock);
if (list_empty(&temp_list)) {
spin_unlock(&ctx->lock);
break;
}
req = list_first_entry(&temp_list,
struct cam_ctx_request, list);
list_del_init(&req->list);
spin_unlock(&ctx->lock);
req->flushed = 1;
flush_args.flush_req_pending[flush_args.num_req_pending++] =
req->req_priv;
free_req = false;
for (i = 0; i < req->num_in_map_entries; i++) {
rc = cam_sync_deregister_callback(
cam_context_sync_callback,
(void *)req,
req->in_map_entries[i].sync_id);
if (!rc) {
cam_context_putref(ctx);
if (atomic_inc_return(&req->num_in_acked) ==
req->num_in_map_entries)
free_req = true;
}
}
for (i = 0; i < req->num_out_map_entries; i++) {
if (req->out_map_entries[i].sync_id != -1) {
rc = cam_sync_signal(
req->out_map_entries[i].sync_id,
CAM_SYNC_STATE_SIGNALED_ERROR);
if (rc == -EALREADY) {
CAM_ERR(CAM_CTXT,
"Req: %llu already signalled, sync_id:%d",
req->request_id,
req->out_map_entries[i].sync_id);
break;
}
}
}
/*
* If we have deregistered the last sync callback, req will
* not be put on the free list. So put it on the free list here
*/
if (free_req) {
req->ctx = NULL;
spin_lock(&ctx->lock);
list_add_tail(&req->list, &ctx->free_req_list);
spin_unlock(&ctx->lock);
}
if (cam_debug_ctx_req_list & ctx->dev_id)
CAM_INFO(CAM_CTXT,
"[%s][%d] : Deleting req[%llu] from temp_list",
ctx->dev_name, ctx->ctx_id, req->request_id);
}
mutex_unlock(&ctx->sync_mutex);
if (ctx->hw_mgr_intf->hw_flush) {
flush_args.num_req_active = 0;
spin_lock(&ctx->lock);
list_for_each_entry(req, &ctx->active_req_list, list) {
flush_args.flush_req_active[flush_args.num_req_active++]
= req->req_priv;
}
spin_unlock(&ctx->lock);
if (flush_args.num_req_pending || flush_args.num_req_active) {
flush_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
flush_args.flush_type = CAM_FLUSH_TYPE_ALL;
ctx->hw_mgr_intf->hw_flush(
ctx->hw_mgr_intf->hw_mgr_priv, &flush_args);
}
}
INIT_LIST_HEAD(&temp_list);
spin_lock(&ctx->lock);
list_splice_init(&ctx->active_req_list, &temp_list);
INIT_LIST_HEAD(&ctx->active_req_list);
spin_unlock(&ctx->lock);
if (cam_debug_ctx_req_list & ctx->dev_id)
CAM_INFO(CAM_CTXT,
"[%s][%d] : Moving all requests from active_list to temp_list",
ctx->dev_name, ctx->ctx_id);
while (true) {
spin_lock(&ctx->lock);
if (list_empty(&temp_list)) {
spin_unlock(&ctx->lock);
break;
}
req = list_first_entry(&temp_list,
struct cam_ctx_request, list);
list_del_init(&req->list);
spin_unlock(&ctx->lock);
for (i = 0; i < req->num_out_map_entries; i++) {
if (req->out_map_entries[i].sync_id != -1) {
rc = cam_sync_signal(
req->out_map_entries[i].sync_id,
CAM_SYNC_STATE_SIGNALED_ERROR);
if (rc == -EALREADY) {
CAM_ERR(CAM_CTXT,
"Req: %llu already signalled ctx: %pK dev_name: %s dev_handle: %d ctx_state: %d",
req->request_id, req->ctx,
req->ctx->dev_name,
req->ctx->dev_hdl,
req->ctx->state);
break;
}
}
}
spin_lock(&ctx->lock);
list_add_tail(&req->list, &ctx->free_req_list);
spin_unlock(&ctx->lock);
req->ctx = NULL;
if (cam_debug_ctx_req_list & ctx->dev_id)
CAM_INFO(CAM_CTXT,
"[%s][%d] : Moving req[%llu] from temp_list to free_list",
ctx->dev_name, ctx->ctx_id, req->request_id);
}
CAM_DBG(CAM_CTXT, "[%s] X: NRT flush ctx", ctx->dev_name);
return 0;
}
int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
struct cam_flush_dev_cmd *cmd)
{
struct cam_ctx_request *req = NULL;
struct cam_hw_flush_args flush_args;
uint32_t i;
int32_t sync_id = 0;
int rc = 0;
bool free_req = false;
CAM_DBG(CAM_CTXT, "[%s] E: NRT flush req", ctx->dev_name);
memset(&flush_args, 0, sizeof(flush_args));
flush_args.num_req_pending = 0;
flush_args.num_req_active = 0;
mutex_lock(&ctx->sync_mutex);
spin_lock(&ctx->lock);
list_for_each_entry(req, &ctx->pending_req_list, list) {
if (req->request_id != cmd->req_id)
continue;
if (cam_debug_ctx_req_list & ctx->dev_id)
CAM_INFO(CAM_CTXT,
"[%s][%d] : Deleting req[%llu] from pending_list",
ctx->dev_name, ctx->ctx_id, req->request_id);
list_del_init(&req->list);
req->flushed = 1;
flush_args.flush_req_pending[flush_args.num_req_pending++] =
req->req_priv;
break;
}
spin_unlock(&ctx->lock);
mutex_unlock(&ctx->sync_mutex);
if (ctx->hw_mgr_intf->hw_flush) {
if (!flush_args.num_req_pending) {
spin_lock(&ctx->lock);
list_for_each_entry(req, &ctx->active_req_list, list) {
if (req->request_id != cmd->req_id)
continue;
list_del_init(&req->list);
flush_args.flush_req_active[
flush_args.num_req_active++] =
req->req_priv;
break;
}
spin_unlock(&ctx->lock);
}
if (flush_args.num_req_pending || flush_args.num_req_active) {
flush_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
flush_args.flush_type = CAM_FLUSH_TYPE_REQ;
ctx->hw_mgr_intf->hw_flush(
ctx->hw_mgr_intf->hw_mgr_priv, &flush_args);
}
}
if (req) {
if (flush_args.num_req_pending) {
for (i = 0; i < req->num_in_map_entries; i++) {
rc = cam_sync_deregister_callback(
cam_context_sync_callback,
(void *)req,
req->in_map_entries[i].sync_id);
if (rc)
continue;
cam_context_putref(ctx);
if (atomic_inc_return(&req->num_in_acked) ==
req->num_in_map_entries)
free_req = true;
}
}
if (flush_args.num_req_pending || flush_args.num_req_active) {
for (i = 0; i < req->num_out_map_entries; i++) {
sync_id =
req->out_map_entries[i].sync_id;
if (sync_id != -1) {
rc = cam_sync_signal(sync_id,
CAM_SYNC_STATE_SIGNALED_ERROR);
if (rc == -EALREADY) {
CAM_ERR(CAM_CTXT,
"Req: %llu already signalled, sync_id:%d",
req->request_id, sync_id);
break;
}
}
}
if (flush_args.num_req_active || free_req) {
req->ctx = NULL;
spin_lock(&ctx->lock);
list_add_tail(&req->list, &ctx->free_req_list);
spin_unlock(&ctx->lock);
if (cam_debug_ctx_req_list & ctx->dev_id)
CAM_INFO(CAM_CTXT,
"[%s][%d] : Moving req[%llu] from %s to free_list",
ctx->dev_name, ctx->ctx_id,
req->request_id,
flush_args.num_req_active ?
"active_list" :
"pending_list");
}
}
}
CAM_DBG(CAM_CTXT, "[%s] X: NRT flush req", ctx->dev_name);
return 0;
}
int32_t cam_context_flush_dev_to_hw(struct cam_context *ctx,
struct cam_flush_dev_cmd *cmd)
{
int rc = 0;
if (!ctx || !cmd) {
CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd);
rc = -EINVAL;
goto end;
}
if (!ctx->hw_mgr_intf) {
CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
ctx->dev_name, ctx->ctx_id);
rc = -EFAULT;
goto end;
}
if (cmd->flush_type == CAM_FLUSH_TYPE_ALL) {
ctx->last_flush_req = cmd->req_id;
rc = cam_context_flush_ctx_to_hw(ctx);
} else if (cmd->flush_type == CAM_FLUSH_TYPE_REQ)
rc = cam_context_flush_req_to_hw(ctx, cmd);
else {
rc = -EINVAL;
CAM_ERR(CAM_CORE, "[%s][%d] Invalid flush type %d",
ctx->dev_name, ctx->ctx_id, cmd->flush_type);
}
end:
return rc;
}
int32_t cam_context_start_dev_to_hw(struct cam_context *ctx,
struct cam_start_stop_dev_cmd *cmd)
{
int rc = 0;
struct cam_hw_start_args arg;
if (!ctx || !cmd) {
CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd);
rc = -EINVAL;
goto end;
}
if (!ctx->hw_mgr_intf) {
CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
ctx->dev_name, ctx->ctx_id);
rc = -EFAULT;
goto end;
}
if ((cmd->session_handle != ctx->session_hdl) ||
(cmd->dev_handle != ctx->dev_hdl)) {
CAM_ERR(CAM_CTXT,
"[%s][%d] Invalid session hdl[%d], dev_handle[%d]",
ctx->dev_name, ctx->ctx_id,
cmd->session_handle, cmd->dev_handle);
rc = -EPERM;
goto end;
}
if (ctx->hw_mgr_intf->hw_start) {
arg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
&arg);
if (rc) {
/* HW failure. user need to clean up the resource */
CAM_ERR(CAM_CTXT, "[%s][%d] Start HW failed",
ctx->dev_name, ctx->ctx_id);
goto end;
}
}
end:
return rc;
}
int32_t cam_context_stop_dev_to_hw(struct cam_context *ctx)
{
int rc = 0;
struct cam_hw_stop_args stop;
if (!ctx) {
CAM_ERR(CAM_CTXT, "Invalid input param");
rc = -EINVAL;
goto end;
}
if (!ctx->hw_mgr_intf) {
CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
ctx->dev_name, ctx->ctx_id);
rc = -EFAULT;
goto end;
}
rc = cam_context_validate_thread();
if (rc)
goto end;
rc = cam_context_flush_ctx_to_hw(ctx);
if (rc)
goto end;
/* stop hw first */
if (ctx->hw_mgr_intf->hw_stop) {
stop.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
&stop);
}
end:
return rc;
}
int32_t cam_context_dump_pf_info_to_hw(struct cam_context *ctx,
struct cam_packet *packet, unsigned long iova, uint32_t buf_info,
bool *mem_found)
{
int rc = 0;
struct cam_hw_cmd_args cmd_args;
if (!ctx) {
CAM_ERR(CAM_CTXT, "Invalid input params %pK ", ctx);
rc = -EINVAL;
goto end;
}
if (!ctx->hw_mgr_intf) {
CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
ctx->dev_name, ctx->ctx_id);
rc = -EFAULT;
goto end;
}
if (ctx->hw_mgr_intf->hw_cmd) {
cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
cmd_args.cmd_type = CAM_HW_MGR_CMD_DUMP_PF_INFO;
cmd_args.u.pf_args.pf_data.packet = packet;
cmd_args.u.pf_args.iova = iova;
cmd_args.u.pf_args.buf_info = buf_info;
cmd_args.u.pf_args.mem_found = mem_found;
ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
&cmd_args);
}
end:
return rc;
}

View file

@ -0,0 +1,31 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_CONTEXT_UTILS_H_
#define _CAM_CONTEXT_UTILS_H_
#include <linux/types.h>
int cam_context_buf_done_from_hw(struct cam_context *ctx,
void *done_event_data, uint32_t bubble_state);
int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
struct cam_release_dev_cmd *cmd);
int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
struct cam_config_dev_cmd *cmd);
int32_t cam_context_acquire_dev_to_hw(struct cam_context *ctx,
struct cam_acquire_dev_cmd *cmd);
int32_t cam_context_start_dev_to_hw(struct cam_context *ctx,
struct cam_start_stop_dev_cmd *cmd);
int32_t cam_context_stop_dev_to_hw(struct cam_context *ctx);
int32_t cam_context_flush_dev_to_hw(struct cam_context *ctx,
struct cam_flush_dev_cmd *cmd);
int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx);
int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
struct cam_flush_dev_cmd *cmd);
int32_t cam_context_dump_pf_info_to_hw(struct cam_context *ctx,
struct cam_packet *packet, unsigned long iova, uint32_t buf_info,
bool *mem_found);
#endif /* _CAM_CONTEXT_UTILS_H_ */

View file

@ -0,0 +1,46 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_HW_H_
#define _CAM_HW_H_
#include "cam_soc_util.h"
/*
* This file declares Enums, Structures and APIs to be used as template
* when writing any HW driver in the camera subsystem.
*/
/* Hardware state enum */
enum cam_hw_state {
CAM_HW_STATE_POWER_DOWN,
CAM_HW_STATE_POWER_UP,
};
/**
* struct cam_hw_info - Common hardware information
*
* @hw_mutex: Hardware mutex
* @hw_lock: Hardware spinlock
* @hw_complete: Hardware Completion
* @open_count: Count to track the HW enable from the client
* @hw_state: Hardware state
* @soc_info: Platform SOC properties for hardware
* @node_info: Private HW data related to nodes
* @core_info: Private HW data related to core logic
*
*/
struct cam_hw_info {
struct mutex hw_mutex;
spinlock_t hw_lock;
struct completion hw_complete;
uint32_t open_count;
enum cam_hw_state hw_state;
struct cam_hw_soc_info soc_info;
void *node_info;
void *core_info;
};
#endif /* _CAM_HW_H_ */

View file

@ -0,0 +1,76 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_HW_INTF_H_
#define _CAM_HW_INTF_H_
#include <linux/types.h>
/*
* This file declares Constants, Enums, Structures and APIs to be used as
* Interface between HW driver and HW Manager.
*/
/**
* struct cam_hw_ops - Hardware layer interface functions
*
* @get_hw_caps: Function pointer for get hw caps
* @init: Function poniter for initialize hardware
* @deinit: Function pointer for deinitialize hardware
* @reset: Function pointer for reset hardware
* @reserve: Function pointer for reserve hardware
* @release: Function pointer for release hardware
* @start: Function pointer for start hardware
* @stop: Function pointer for stop hardware
* @read: Function pointer for read hardware registers
* @write: Function pointer for Write hardware registers
* @process_cmd: Function pointer for additional hardware controls
* @flush_cmd: Function pointer for flush requests
*
*/
struct cam_hw_ops {
int (*get_hw_caps)(void *hw_priv,
void *get_hw_cap_args, uint32_t arg_size);
int (*init)(void *hw_priv,
void *init_hw_args, uint32_t arg_size);
int (*deinit)(void *hw_priv,
void *init_hw_args, uint32_t arg_size);
int (*reset)(void *hw_priv,
void *reset_core_args, uint32_t arg_size);
int (*reserve)(void *hw_priv,
void *reserve_args, uint32_t arg_size);
int (*release)(void *hw_priv,
void *release_args, uint32_t arg_size);
int (*start)(void *hw_priv,
void *start_args, uint32_t arg_size);
int (*stop)(void *hw_priv,
void *stop_args, uint32_t arg_size);
int (*read)(void *hw_priv,
void *read_args, uint32_t arg_size);
int (*write)(void *hw_priv,
void *write_args, uint32_t arg_size);
int (*process_cmd)(void *hw_priv,
uint32_t cmd_type, void *cmd_args, uint32_t arg_size);
int (*flush)(void *hw_priv,
void *flush_args, uint32_t arg_size);
};
/**
* struct cam_hw_intf - Common hardware node
*
* @hw_type: Hardware type
* @hw_idx: Hardware ID
* @hw_ops: Hardware interface function table
* @hw_priv: Private hardware node pointer
*
*/
struct cam_hw_intf {
uint32_t hw_type;
uint32_t hw_idx;
struct cam_hw_ops hw_ops;
void *hw_priv;
};
#endif /* _CAM_HW_INTF_H_ */

View file

@ -0,0 +1,309 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_HW_MGR_INTF_H_
#define _CAM_HW_MGR_INTF_H_
#include <linux/time.h>
#include <linux/types.h>
/*
* This file declares Constants, Enums, Structures and APIs to be used as
* Interface between HW Manager and Context.
*/
/* maximum context numbers */
#define CAM_CTX_MAX 8
/* maximum buf done irqs */
#define CAM_NUM_OUT_PER_COMP_IRQ_MAX 12
/* hardware event callback function type */
typedef int (*cam_hw_event_cb_func)(void *context, uint32_t evt_id,
void *evt_data);
/* hardware page fault callback function type */
typedef int (*cam_hw_pagefault_cb_func)(void *context, unsigned long iova,
uint32_t buf_info);
/**
* struct cam_hw_update_entry - Entry for hardware config
*
* @handle: Memory handle for the configuration
* @offset: Memory offset
* @len: Size of the configuration
* @flags: Flags for the config entry(eg. DMI)
* @addr: Address of hardware update entry
*
*/
struct cam_hw_update_entry {
int handle;
uint32_t offset;
uint32_t len;
uint32_t flags;
uintptr_t addr;
};
/**
* struct cam_hw_fence_map_entry - Entry for the resource to sync id map
*
* @resrouce_handle: Resource port id for the buffer
* @sync_id: Sync id
*
*/
struct cam_hw_fence_map_entry {
uint32_t resource_handle;
int32_t sync_id;
};
/**
* struct cam_hw_done_event_data - Payload for hw done event
*
* @num_handles: number of handles in the event
* @resrouce_handle: list of the resource handle
* @timestamp: time stamp
* @request_id: request identifier
*
*/
struct cam_hw_done_event_data {
uint32_t num_handles;
uint32_t resource_handle[CAM_NUM_OUT_PER_COMP_IRQ_MAX];
struct timeval timestamp;
uint64_t request_id;
};
/**
* struct cam_hw_acquire_args - Payload for acquire command
*
* @context_data: Context data pointer for the callback function
* @event_cb: Callback function array
* @num_acq: Total number of acquire in the payload
* @acquire_info: Acquired resource array pointer
* @ctxt_to_hw_map: HW context (returned)
*
*/
struct cam_hw_acquire_args {
void *context_data;
cam_hw_event_cb_func event_cb;
uint32_t num_acq;
uint32_t acquire_info_size;
uintptr_t acquire_info;
void *ctxt_to_hw_map;
};
/**
* struct cam_hw_release_args - Payload for release command
*
* @ctxt_to_hw_map: HW context from the acquire
* @active_req: Active request flag
*
*/
struct cam_hw_release_args {
void *ctxt_to_hw_map;
bool active_req;
};
/**
* struct cam_hw_start_args - Payload for start command
*
* @ctxt_to_hw_map: HW context from the acquire
* @num_hw_update_entries: Number of Hardware configuration
* @hw_update_entries: Hardware configuration list
*
*/
struct cam_hw_start_args {
void *ctxt_to_hw_map;
uint32_t num_hw_update_entries;
struct cam_hw_update_entry *hw_update_entries;
};
/**
* struct cam_hw_stop_args - Payload for stop command
*
* @ctxt_to_hw_map: HW context from the acquire
* @args: Arguments to pass for stop
*
*/
struct cam_hw_stop_args {
void *ctxt_to_hw_map;
void *args;
};
/**
* struct cam_hw_mgr_dump_pf_data - page fault debug data
*
* packet: pointer to packet
*/
struct cam_hw_mgr_dump_pf_data {
void *packet;
};
/**
* struct cam_hw_prepare_update_args - Payload for prepare command
*
* @packet: CSL packet from user mode driver
* @ctxt_to_hw_map: HW context from the acquire
* @max_hw_update_entries: Maximum hardware update entries supported
* @hw_update_entries: Actual hardware update configuration (returned)
* @num_hw_update_entries: Number of actual hardware update entries (returned)
* @max_out_map_entries: Maximum output fence mapping supported
* @out_map_entries: Actual output fence mapping list (returned)
* @num_out_map_entries: Number of actual output fence mapping (returned)
* @max_in_map_entries: Maximum input fence mapping supported
* @in_map_entries: Actual input fence mapping list (returned)
* @num_in_map_entries: Number of acutal input fence mapping (returned)
* @priv: Private pointer of hw update
* @pf_data: Debug data for page fault
*
*/
struct cam_hw_prepare_update_args {
struct cam_packet *packet;
void *ctxt_to_hw_map;
uint32_t max_hw_update_entries;
struct cam_hw_update_entry *hw_update_entries;
uint32_t num_hw_update_entries;
uint32_t max_out_map_entries;
struct cam_hw_fence_map_entry *out_map_entries;
uint32_t num_out_map_entries;
uint32_t max_in_map_entries;
struct cam_hw_fence_map_entry *in_map_entries;
uint32_t num_in_map_entries;
void *priv;
struct cam_hw_mgr_dump_pf_data *pf_data;
};
/**
* struct cam_hw_config_args - Payload for config command
*
* @ctxt_to_hw_map: HW context from the acquire
* @num_hw_update_entries: Number of hardware update entries
* @hw_update_entries: Hardware update list
* @out_map_entries: Out map info
* @num_out_map_entries: Number of out map entries
* @priv: Private pointer
* @request_id: Request ID
*
*/
struct cam_hw_config_args {
void *ctxt_to_hw_map;
uint32_t num_hw_update_entries;
struct cam_hw_update_entry *hw_update_entries;
struct cam_hw_fence_map_entry *out_map_entries;
uint32_t num_out_map_entries;
void *priv;
uint64_t request_id;
bool init_packet;
};
/**
* struct cam_hw_flush_args - Flush arguments
*
* @ctxt_to_hw_map: HW context from the acquire
* @num_req_pending: Num request to flush, valid when flush type is REQ
* @flush_req_pending: Request pending pointers to flush
* @num_req_active: Num request to flush, valid when flush type is REQ
* @flush_req_active: Request active pointers to flush
* @flush_type: The flush type
*
*/
struct cam_hw_flush_args {
void *ctxt_to_hw_map;
uint32_t num_req_pending;
void *flush_req_pending[20];
uint32_t num_req_active;
void *flush_req_active[20];
enum flush_type_t flush_type;
};
/**
* struct cam_hw_dump_pf_args - Payload for dump pf info command
*
* @pf_data: Debug data for page fault
* @iova: Page fault address
* @buf_info: Info about memory buffer where page
* fault occurred
* @mem_found: If fault memory found in current
* request
*
*/
struct cam_hw_dump_pf_args {
struct cam_hw_mgr_dump_pf_data pf_data;
unsigned long iova;
uint32_t buf_info;
bool *mem_found;
};
/* enum cam_hw_mgr_command - Hardware manager command type */
enum cam_hw_mgr_command {
CAM_HW_MGR_CMD_INTERNAL,
CAM_HW_MGR_CMD_DUMP_PF_INFO,
};
/**
* struct cam_hw_cmd_args - Payload for hw manager command
*
* @ctxt_to_hw_map: HW context from the acquire
* @cmd_type HW command type
* @internal_args Arguments for internal command
* @pf_args Arguments for Dump PF info command
*
*/
struct cam_hw_cmd_args {
void *ctxt_to_hw_map;
uint32_t cmd_type;
union {
void *internal_args;
struct cam_hw_dump_pf_args pf_args;
} u;
};
/**
* cam_hw_mgr_intf - HW manager interface
*
* @hw_mgr_priv: HW manager object
* @hw_get_caps: Function pointer for get hw caps
* args = cam_query_cap_cmd
* @hw_acquire: Function poniter for acquire hw resources
* args = cam_hw_acquire_args
* @hw_release: Function pointer for release hw device resource
* args = cam_hw_release_args
* @hw_start: Function pointer for start hw devices
* args = cam_hw_start_args
* @hw_stop: Function pointer for stop hw devices
* args = cam_hw_stop_args
* @hw_prepare_update: Function pointer for prepare hw update for hw devices
* args = cam_hw_prepare_update_args
* @hw_config: Function pointer for configure hw devices
* args = cam_hw_config_args
* @hw_read: Function pointer for read hardware registers
* @hw_write: Function pointer for Write hardware registers
* @hw_cmd: Function pointer for any customized commands for the
* hardware manager
* @hw_open: Function pointer for HW init
* @hw_close: Function pointer for HW deinit
* @hw_flush: Function pointer for HW flush
*
*/
struct cam_hw_mgr_intf {
void *hw_mgr_priv;
int (*hw_get_caps)(void *hw_priv, void *hw_caps_args);
int (*hw_acquire)(void *hw_priv, void *hw_acquire_args);
int (*hw_release)(void *hw_priv, void *hw_release_args);
int (*hw_start)(void *hw_priv, void *hw_start_args);
int (*hw_stop)(void *hw_priv, void *hw_stop_args);
int (*hw_prepare_update)(void *hw_priv, void *hw_prepare_update_args);
int (*hw_config)(void *hw_priv, void *hw_config_args);
int (*hw_read)(void *hw_priv, void *read_args);
int (*hw_write)(void *hw_priv, void *write_args);
int (*hw_cmd)(void *hw_priv, void *write_args);
int (*hw_open)(void *hw_priv, void *fw_download_args);
int (*hw_close)(void *hw_priv, void *hw_close_args);
int (*hw_flush)(void *hw_priv, void *hw_flush_args);
};
#endif /* _CAM_HW_MGR_INTF_H_ */

View file

@ -0,0 +1,783 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/debugfs.h>
#include <linux/videodev2.h>
#include <linux/uaccess.h>
#include "cam_node.h"
#include "cam_trace.h"
#include "cam_debug_util.h"
static void cam_node_print_ctx_state(
struct cam_node *node)
{
int i;
struct cam_context *ctx;
CAM_INFO(CAM_CORE, "[%s] state=%d, ctx_size %d",
node->name, node->state, node->ctx_size);
mutex_lock(&node->list_mutex);
for (i = 0; i < node->ctx_size; i++) {
ctx = &node->ctx_list[i];
spin_lock(&ctx->lock);
CAM_INFO(CAM_CORE,
"[%s][%d] : state=%d, refcount=%d, active_req_list=%d, pending_req_list=%d, wait_req_list=%d, free_req_list=%d",
ctx->dev_name ? ctx->dev_name : "null",
i, ctx->state,
atomic_read(&(ctx->refcount.refcount.refs)),
list_empty(&ctx->active_req_list),
list_empty(&ctx->pending_req_list),
list_empty(&ctx->wait_req_list),
list_empty(&ctx->free_req_list));
spin_unlock(&ctx->lock);
}
mutex_unlock(&node->list_mutex);
}
static struct cam_context *cam_node_get_ctxt_from_free_list(
struct cam_node *node)
{
struct cam_context *ctx = NULL;
mutex_lock(&node->list_mutex);
if (!list_empty(&node->free_ctx_list)) {
ctx = list_first_entry(&node->free_ctx_list,
struct cam_context, list);
list_del_init(&ctx->list);
}
mutex_unlock(&node->list_mutex);
if (ctx)
kref_init(&ctx->refcount);
return ctx;
}
void cam_node_put_ctxt_to_free_list(struct kref *ref)
{
struct cam_context *ctx =
container_of(ref, struct cam_context, refcount);
struct cam_node *node = ctx->node;
mutex_lock(&node->list_mutex);
list_add_tail(&ctx->list, &node->free_ctx_list);
mutex_unlock(&node->list_mutex);
}
static int __cam_node_handle_query_cap(struct cam_node *node,
struct cam_query_cap_cmd *query)
{
int rc = -EFAULT;
if (!query) {
CAM_ERR(CAM_CORE, "Invalid params");
return -EINVAL;
}
if (node->hw_mgr_intf.hw_get_caps) {
rc = node->hw_mgr_intf.hw_get_caps(
node->hw_mgr_intf.hw_mgr_priv, query);
}
return rc;
}
static int __cam_node_handle_acquire_dev(struct cam_node *node,
struct cam_acquire_dev_cmd *acquire)
{
int rc = 0;
struct cam_context *ctx = NULL;
if (!acquire)
return -EINVAL;
ctx = cam_node_get_ctxt_from_free_list(node);
if (!ctx) {
CAM_ERR(CAM_CORE, "No free ctx in free list node %s",
node->name);
cam_node_print_ctx_state(node);
rc = -ENOMEM;
goto err;
}
rc = cam_context_handle_acquire_dev(ctx, acquire);
if (rc) {
CAM_ERR(CAM_CORE, "Acquire device failed for node %s",
node->name);
goto free_ctx;
}
CAM_DBG(CAM_CORE, "[%s] Acquire ctx_id %d",
node->name, ctx->ctx_id);
return 0;
free_ctx:
cam_context_putref(ctx);
err:
return rc;
}
static int __cam_node_handle_acquire_hw_v1(struct cam_node *node,
struct cam_acquire_hw_cmd_v1 *acquire)
{
int rc = 0;
struct cam_context *ctx = NULL;
if (!acquire)
return -EINVAL;
if (acquire->dev_handle <= 0) {
CAM_ERR(CAM_CORE, "Invalid device handle for context");
return -EINVAL;
}
if (acquire->session_handle <= 0) {
CAM_ERR(CAM_CORE, "Invalid session handle for context");
return -EINVAL;
}
ctx = (struct cam_context *)cam_get_device_priv(acquire->dev_handle);
if (!ctx) {
CAM_ERR(CAM_CORE, "Can not get context for handle %d",
acquire->dev_handle);
return -EINVAL;
}
rc = cam_context_handle_acquire_hw(ctx, acquire);
if (rc) {
CAM_ERR(CAM_CORE, "Acquire device failed for node %s",
node->name);
return rc;
}
CAM_DBG(CAM_CORE, "[%s] Acquire ctx_id %d",
node->name, ctx->ctx_id);
return 0;
}
static int __cam_node_handle_start_dev(struct cam_node *node,
struct cam_start_stop_dev_cmd *start)
{
struct cam_context *ctx = NULL;
int rc;
if (!start)
return -EINVAL;
if (start->dev_handle <= 0) {
CAM_ERR(CAM_CORE, "Invalid device handle for context");
return -EINVAL;
}
if (start->session_handle <= 0) {
CAM_ERR(CAM_CORE, "Invalid session handle for context");
return -EINVAL;
}
ctx = (struct cam_context *)cam_get_device_priv(start->dev_handle);
if (!ctx) {
CAM_ERR(CAM_CORE, "Can not get context for handle %d",
start->dev_handle);
return -EINVAL;
}
rc = cam_context_handle_start_dev(ctx, start);
if (rc)
CAM_ERR(CAM_CORE, "Start failure for node %s", node->name);
return rc;
}
static int __cam_node_handle_stop_dev(struct cam_node *node,
struct cam_start_stop_dev_cmd *stop)
{
struct cam_context *ctx = NULL;
int rc;
if (!stop)
return -EINVAL;
if (stop->dev_handle <= 0) {
CAM_ERR(CAM_CORE, "Invalid device handle for context");
return -EINVAL;
}
if (stop->session_handle <= 0) {
CAM_ERR(CAM_CORE, "Invalid session handle for context");
return -EINVAL;
}
ctx = (struct cam_context *)cam_get_device_priv(stop->dev_handle);
if (!ctx) {
CAM_ERR(CAM_CORE, "Can not get context for handle %d",
stop->dev_handle);
return -EINVAL;
}
rc = cam_context_handle_stop_dev(ctx, stop);
if (rc)
CAM_ERR(CAM_CORE, "Stop failure for node %s", node->name);
return rc;
}
static int __cam_node_handle_config_dev(struct cam_node *node,
struct cam_config_dev_cmd *config)
{
struct cam_context *ctx = NULL;
int rc;
if (!config)
return -EINVAL;
if (config->dev_handle <= 0) {
CAM_ERR(CAM_CORE, "Invalid device handle for context");
return -EINVAL;
}
if (config->session_handle <= 0) {
CAM_ERR(CAM_CORE, "Invalid session handle for context");
return -EINVAL;
}
ctx = (struct cam_context *)cam_get_device_priv(config->dev_handle);
if (!ctx) {
CAM_ERR(CAM_CORE, "Can not get context for handle %d",
config->dev_handle);
return -EINVAL;
}
rc = cam_context_handle_config_dev(ctx, config);
if (rc)
CAM_ERR(CAM_CORE, "Config failure for node %s", node->name);
return rc;
}
static int __cam_node_handle_flush_dev(struct cam_node *node,
struct cam_flush_dev_cmd *flush)
{
struct cam_context *ctx = NULL;
int rc;
if (!flush)
return -EINVAL;
if (flush->dev_handle <= 0) {
CAM_ERR(CAM_CORE, "Invalid device handle for context");
return -EINVAL;
}
if (flush->session_handle <= 0) {
CAM_ERR(CAM_CORE, "Invalid session handle for context");
return -EINVAL;
}
ctx = (struct cam_context *)cam_get_device_priv(flush->dev_handle);
if (!ctx) {
CAM_ERR(CAM_CORE, "Can not get context for handle %d",
flush->dev_handle);
return -EINVAL;
}
rc = cam_context_handle_flush_dev(ctx, flush);
if (rc)
CAM_ERR(CAM_CORE, "Flush failure for node %s", node->name);
return rc;
}
static int __cam_node_handle_release_dev(struct cam_node *node,
struct cam_release_dev_cmd *release)
{
int rc = 0;
struct cam_context *ctx = NULL;
if (!release)
return -EINVAL;
if (release->dev_handle <= 0) {
CAM_ERR(CAM_CORE, "Invalid device handle for context");
return -EINVAL;
}
if (release->session_handle <= 0) {
CAM_ERR(CAM_CORE, "Invalid session handle for context");
return -EINVAL;
}
ctx = (struct cam_context *)cam_get_device_priv(release->dev_handle);
if (!ctx) {
CAM_ERR(CAM_CORE, "Can not get context for handle %d node %s",
release->dev_handle, node->name);
return -EINVAL;
}
if (ctx->state > CAM_CTX_UNINIT && ctx->state < CAM_CTX_STATE_MAX) {
rc = cam_context_handle_release_dev(ctx, release);
if (rc)
CAM_ERR(CAM_CORE, "context release failed for node %s",
node->name);
} else {
CAM_WARN(CAM_CORE,
"node %s context id %u state %d invalid to release hdl",
node->name, ctx->ctx_id, ctx->state);
goto destroy_dev_hdl;
}
cam_context_putref(ctx);
destroy_dev_hdl:
rc = cam_destroy_device_hdl(release->dev_handle);
if (rc)
CAM_ERR(CAM_CORE, "destroy device hdl failed for node %s",
node->name);
else
ctx->dev_hdl = -1;
CAM_DBG(CAM_CORE, "[%s] Release ctx_id=%d, refcount=%d",
node->name, ctx->ctx_id,
atomic_read(&(ctx->refcount.refcount.refs)));
return rc;
}
static int __cam_node_handle_release_hw_v1(struct cam_node *node,
struct cam_release_hw_cmd_v1 *release)
{
int rc = 0;
struct cam_context *ctx = NULL;
if (!release)
return -EINVAL;
if (release->dev_handle <= 0) {
CAM_ERR(CAM_CORE, "Invalid device handle for context");
return -EINVAL;
}
if (release->session_handle <= 0) {
CAM_ERR(CAM_CORE, "Invalid session handle for context");
return -EINVAL;
}
ctx = (struct cam_context *)cam_get_device_priv(release->dev_handle);
if (!ctx) {
CAM_ERR(CAM_CORE, "Can not get context for handle %d node %s",
release->dev_handle, node->name);
return -EINVAL;
}
rc = cam_context_handle_release_hw(ctx, release);
if (rc)
CAM_ERR(CAM_CORE, "context release failed node %s", node->name);
CAM_DBG(CAM_CORE, "[%s] Release ctx_id=%d, refcount=%d",
node->name, ctx->ctx_id,
atomic_read(&(ctx->refcount.refcount.refs)));
return rc;
}
static int __cam_node_crm_get_dev_info(struct cam_req_mgr_device_info *info)
{
struct cam_context *ctx = NULL;
if (!info)
return -EINVAL;
ctx = (struct cam_context *) cam_get_device_priv(info->dev_hdl);
if (!ctx) {
CAM_ERR(CAM_CORE, "Can not get context for handle %d",
info->dev_hdl);
return -EINVAL;
}
return cam_context_handle_crm_get_dev_info(ctx, info);
}
static int __cam_node_crm_link_setup(
struct cam_req_mgr_core_dev_link_setup *setup)
{
int rc;
struct cam_context *ctx = NULL;
if (!setup)
return -EINVAL;
ctx = (struct cam_context *) cam_get_device_priv(setup->dev_hdl);
if (!ctx) {
CAM_ERR(CAM_CORE, "Can not get context for handle %d",
setup->dev_hdl);
return -EINVAL;
}
if (setup->link_enable)
rc = cam_context_handle_crm_link(ctx, setup);
else
rc = cam_context_handle_crm_unlink(ctx, setup);
return rc;
}
static int __cam_node_crm_apply_req(struct cam_req_mgr_apply_request *apply)
{
struct cam_context *ctx = NULL;
if (!apply)
return -EINVAL;
ctx = (struct cam_context *) cam_get_device_priv(apply->dev_hdl);
if (!ctx) {
CAM_ERR(CAM_CORE, "Can not get context for handle %d",
apply->dev_hdl);
return -EINVAL;
}
trace_cam_apply_req("Node", apply->request_id);
return cam_context_handle_crm_apply_req(ctx, apply);
}
static int __cam_node_crm_flush_req(struct cam_req_mgr_flush_request *flush)
{
struct cam_context *ctx = NULL;
if (!flush) {
CAM_ERR(CAM_CORE, "Invalid flush request payload");
return -EINVAL;
}
ctx = (struct cam_context *) cam_get_device_priv(flush->dev_hdl);
if (!ctx) {
CAM_ERR(CAM_CORE, "Can not get context for handle %d",
flush->dev_hdl);
return -EINVAL;
}
return cam_context_handle_crm_flush_req(ctx, flush);
}
static int __cam_node_crm_process_evt(
struct cam_req_mgr_link_evt_data *evt_data)
{
struct cam_context *ctx = NULL;
if (!evt_data) {
CAM_ERR(CAM_CORE, "Invalid process event request payload");
return -EINVAL;
}
ctx = (struct cam_context *) cam_get_device_priv(evt_data->dev_hdl);
if (!ctx) {
CAM_ERR(CAM_CORE, "Can not get context for handle %d",
evt_data->dev_hdl);
return -EINVAL;
}
return cam_context_handle_crm_process_evt(ctx, evt_data);
}
int cam_node_deinit(struct cam_node *node)
{
if (node)
memset(node, 0, sizeof(*node));
CAM_DBG(CAM_CORE, "deinit complete");
return 0;
}
int cam_node_shutdown(struct cam_node *node)
{
int i = 0;
int rc = 0;
if (!node)
return -EINVAL;
for (i = 0; i < node->ctx_size; i++) {
if (node->ctx_list[i].dev_hdl > 0) {
CAM_DBG(CAM_CORE,
"Node [%s] invoking shutdown on context [%d]",
node->name, i);
rc = cam_context_shutdown(&(node->ctx_list[i]));
}
}
if (node->hw_mgr_intf.hw_close)
node->hw_mgr_intf.hw_close(node->hw_mgr_intf.hw_mgr_priv,
NULL);
return 0;
}
int cam_node_init(struct cam_node *node, struct cam_hw_mgr_intf *hw_mgr_intf,
struct cam_context *ctx_list, uint32_t ctx_size, char *name)
{
int rc = 0;
int i;
if (!node || !hw_mgr_intf ||
sizeof(node->hw_mgr_intf) != sizeof(*hw_mgr_intf)) {
return -EINVAL;
}
memset(node, 0, sizeof(*node));
strlcpy(node->name, name, sizeof(node->name));
memcpy(&node->hw_mgr_intf, hw_mgr_intf, sizeof(node->hw_mgr_intf));
node->crm_node_intf.apply_req = __cam_node_crm_apply_req;
node->crm_node_intf.get_dev_info = __cam_node_crm_get_dev_info;
node->crm_node_intf.link_setup = __cam_node_crm_link_setup;
node->crm_node_intf.flush_req = __cam_node_crm_flush_req;
node->crm_node_intf.process_evt = __cam_node_crm_process_evt;
mutex_init(&node->list_mutex);
INIT_LIST_HEAD(&node->free_ctx_list);
node->ctx_list = ctx_list;
node->ctx_size = ctx_size;
for (i = 0; i < ctx_size; i++) {
if (!ctx_list[i].state_machine) {
CAM_ERR(CAM_CORE,
"camera context %d is not initialized", i);
rc = -1;
goto err;
}
INIT_LIST_HEAD(&ctx_list[i].list);
list_add_tail(&ctx_list[i].list, &node->free_ctx_list);
ctx_list[i].node = node;
}
node->state = CAM_NODE_STATE_INIT;
err:
CAM_DBG(CAM_CORE, "Exit. (rc = %d)", rc);
return rc;
}
int cam_node_handle_ioctl(struct cam_node *node, struct cam_control *cmd)
{
int rc = 0;
if (!cmd)
return -EINVAL;
CAM_DBG(CAM_CORE, "handle cmd %d", cmd->op_code);
switch (cmd->op_code) {
case CAM_QUERY_CAP: {
struct cam_query_cap_cmd query;
if (copy_from_user(&query, u64_to_user_ptr(cmd->handle),
sizeof(query))) {
rc = -EFAULT;
break;
}
rc = __cam_node_handle_query_cap(node, &query);
if (rc) {
CAM_ERR(CAM_CORE, "querycap is failed(rc = %d)",
rc);
break;
}
if (copy_to_user(u64_to_user_ptr(cmd->handle), &query,
sizeof(query)))
rc = -EFAULT;
break;
}
case CAM_ACQUIRE_DEV: {
struct cam_acquire_dev_cmd acquire;
if (copy_from_user(&acquire, u64_to_user_ptr(cmd->handle),
sizeof(acquire))) {
rc = -EFAULT;
break;
}
rc = __cam_node_handle_acquire_dev(node, &acquire);
if (rc) {
CAM_ERR(CAM_CORE, "acquire device failed(rc = %d)",
rc);
break;
}
if (copy_to_user(u64_to_user_ptr(cmd->handle), &acquire,
sizeof(acquire)))
rc = -EFAULT;
break;
}
case CAM_ACQUIRE_HW: {
uint32_t api_version;
void *acquire_ptr = NULL;
size_t acquire_size;
if (copy_from_user(&api_version, (void __user *)cmd->handle,
sizeof(api_version))) {
rc = -EFAULT;
break;
}
if (api_version == 1) {
acquire_size = sizeof(struct cam_acquire_hw_cmd_v1);
} else {
CAM_ERR(CAM_CORE, "Unsupported api version %d",
api_version);
rc = -EINVAL;
break;
}
acquire_ptr = kzalloc(acquire_size, GFP_KERNEL);
if (!acquire_ptr) {
CAM_ERR(CAM_CORE, "No memory for acquire HW");
rc = -ENOMEM;
break;
}
if (copy_from_user(acquire_ptr, (void __user *)cmd->handle,
acquire_size)) {
rc = -EFAULT;
goto acquire_kfree;
}
if (api_version == 1) {
rc = __cam_node_handle_acquire_hw_v1(node, acquire_ptr);
if (rc) {
CAM_ERR(CAM_CORE,
"acquire device failed(rc = %d)", rc);
goto acquire_kfree;
}
}
if (copy_to_user((void __user *)cmd->handle, acquire_ptr,
acquire_size))
rc = -EFAULT;
acquire_kfree:
kfree(acquire_ptr);
break;
}
case CAM_START_DEV: {
struct cam_start_stop_dev_cmd start;
if (copy_from_user(&start, u64_to_user_ptr(cmd->handle),
sizeof(start)))
rc = -EFAULT;
else {
rc = __cam_node_handle_start_dev(node, &start);
if (rc)
CAM_ERR(CAM_CORE,
"start device failed(rc = %d)", rc);
}
break;
}
case CAM_STOP_DEV: {
struct cam_start_stop_dev_cmd stop;
if (copy_from_user(&stop, u64_to_user_ptr(cmd->handle),
sizeof(stop)))
rc = -EFAULT;
else {
rc = __cam_node_handle_stop_dev(node, &stop);
if (rc)
CAM_ERR(CAM_CORE,
"stop device failed(rc = %d)", rc);
}
break;
}
case CAM_CONFIG_DEV: {
struct cam_config_dev_cmd config;
if (copy_from_user(&config, u64_to_user_ptr(cmd->handle),
sizeof(config)))
rc = -EFAULT;
else {
rc = __cam_node_handle_config_dev(node, &config);
if (rc)
CAM_ERR(CAM_CORE,
"config device failed(rc = %d)", rc);
}
break;
}
case CAM_RELEASE_DEV: {
struct cam_release_dev_cmd release;
if (copy_from_user(&release, u64_to_user_ptr(cmd->handle),
sizeof(release)))
rc = -EFAULT;
else {
rc = __cam_node_handle_release_dev(node, &release);
if (rc)
CAM_ERR(CAM_CORE,
"release device failed(rc = %d)", rc);
}
break;
}
case CAM_RELEASE_HW: {
uint32_t api_version;
size_t release_size;
void *release_ptr = NULL;
if (copy_from_user(&api_version, (void __user *)cmd->handle,
sizeof(api_version))) {
rc = -EFAULT;
break;
}
if (api_version == 1) {
release_size = sizeof(struct cam_release_hw_cmd_v1);
} else {
CAM_ERR(CAM_CORE, "Unsupported api version %d",
api_version);
rc = -EINVAL;
break;
}
release_ptr = kzalloc(release_size, GFP_KERNEL);
if (!release_ptr) {
CAM_ERR(CAM_CORE, "No memory for release HW");
rc = -ENOMEM;
break;
}
if (copy_from_user(release_ptr, (void __user *)cmd->handle,
release_size)) {
rc = -EFAULT;
goto release_kfree;
}
if (api_version == 1) {
rc = __cam_node_handle_release_hw_v1(node, release_ptr);
if (rc)
CAM_ERR(CAM_CORE,
"release device failed(rc = %d)", rc);
}
release_kfree:
kfree(release_ptr);
break;
}
case CAM_FLUSH_REQ: {
struct cam_flush_dev_cmd flush;
if (copy_from_user(&flush, u64_to_user_ptr(cmd->handle),
sizeof(flush)))
rc = -EFAULT;
else {
rc = __cam_node_handle_flush_dev(node, &flush);
if (rc)
CAM_ERR(CAM_CORE,
"flush device failed(rc = %d)", rc);
}
break;
}
default:
CAM_ERR(CAM_CORE, "Unknown op code %d", cmd->op_code);
rc = -EINVAL;
}
return rc;
}

View file

@ -0,0 +1,104 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_NODE_H_
#define _CAM_NODE_H_
#include <linux/kref.h>
#include "cam_context.h"
#include "cam_hw_mgr_intf.h"
#include "cam_req_mgr_interface.h"
#define CAM_NODE_NAME_LENGTH_MAX 256
#define CAM_NODE_STATE_UNINIT 0
#define CAM_NODE_STATE_INIT 1
/**
* struct cam_node - Singleton Node for camera HW devices
*
* @name: Name for struct cam_node
* @state: Node state:
* 0 = uninitialized, 1 = initialized
* @list_mutex: Mutex for the context pool
* @free_ctx_list: Free context pool list
* @ctx_list: Context list
* @ctx_size: Context list size
* @hw_mgr_intf: Interface for cam_node to HW
* @crm_node_intf: Interface for the CRM to cam_node
*
*/
struct cam_node {
char name[CAM_NODE_NAME_LENGTH_MAX];
uint32_t state;
/* context pool */
struct mutex list_mutex;
struct list_head free_ctx_list;
struct cam_context *ctx_list;
uint32_t ctx_size;
/* interfaces */
struct cam_hw_mgr_intf hw_mgr_intf;
struct cam_req_mgr_kmd_ops crm_node_intf;
};
/**
* cam_node_handle_ioctl()
*
* @brief: Handle ioctl commands
*
* @node: Node handle
* @cmd: IOCTL command
*
*/
int cam_node_handle_ioctl(struct cam_node *node, struct cam_control *cmd);
/**
* cam_node_deinit()
*
* @brief: Deinitialization function for the Node interface
*
* @node: Node handle
*
*/
int cam_node_deinit(struct cam_node *node);
/**
* cam_node_shutdown()
*
* @brief: Shutdowns/Closes the cam node.
*
* @node: Cam_node pointer
*
*/
int cam_node_shutdown(struct cam_node *node);
/**
* cam_node_init()
*
* @brief: Initialization function for the Node interface.
*
* @node: Cam_node pointer
* @hw_mgr_intf: HW manager interface blob
* @ctx_list: List of cam_contexts to be added
* @ctx_size: Size of the cam_context
* @name: Name for the node
*
*/
int cam_node_init(struct cam_node *node, struct cam_hw_mgr_intf *hw_mgr_intf,
struct cam_context *ctx_list, uint32_t ctx_size, char *name);
/**
* cam_node_put_ctxt_to_free_list()
*
* @brief: Put context in node free list.
*
* @ref: Context's kref object
*
*/
void cam_node_put_ctxt_to_free_list(struct kref *ref);
#endif /* _CAM_NODE_H_ */

View file

@ -0,0 +1,154 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include "cam_subdev.h"
#include "cam_node.h"
#include "cam_debug_util.h"
/**
* cam_subdev_subscribe_event()
*
* @brief: function to subscribe to v4l2 events
*
* @sd: Pointer to struct v4l2_subdev.
* @fh: Pointer to struct v4l2_fh.
* @sub: Pointer to struct v4l2_event_subscription.
*/
static int cam_subdev_subscribe_event(struct v4l2_subdev *sd,
struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
{
return v4l2_event_subscribe(fh, sub, CAM_SUBDEVICE_EVENT_MAX, NULL);
}
/**
* cam_subdev_unsubscribe_event()
*
* @brief: function to unsubscribe from v4l2 events
*
* @sd: Pointer to struct v4l2_subdev.
* @fh: Pointer to struct v4l2_fh.
* @sub: Pointer to struct v4l2_event_subscription.
*/
static int cam_subdev_unsubscribe_event(struct v4l2_subdev *sd,
struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
{
return v4l2_event_unsubscribe(fh, sub);
}
static long cam_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd,
void *arg)
{
long rc;
struct cam_node *node =
(struct cam_node *) v4l2_get_subdevdata(sd);
if (!node || node->state == CAM_NODE_STATE_UNINIT) {
rc = -EINVAL;
goto end;
}
switch (cmd) {
case VIDIOC_CAM_CONTROL:
rc = cam_node_handle_ioctl(node,
(struct cam_control *) arg);
break;
default:
CAM_ERR(CAM_CORE, "Invalid command %d for %s", cmd,
node->name);
rc = -EINVAL;
}
end:
return rc;
}
#ifdef CONFIG_COMPAT
static long cam_subdev_compat_ioctl(struct v4l2_subdev *sd,
unsigned int cmd, unsigned long arg)
{
struct cam_control cmd_data;
int rc;
if (copy_from_user(&cmd_data, (void __user *)arg,
sizeof(cmd_data))) {
CAM_ERR(CAM_CORE, "Failed to copy from user_ptr=%pK size=%zu",
(void __user *)arg, sizeof(cmd_data));
return -EFAULT;
}
rc = cam_subdev_ioctl(sd, cmd, &cmd_data);
if (!rc) {
if (copy_to_user((void __user *)arg, &cmd_data,
sizeof(cmd_data))) {
CAM_ERR(CAM_CORE,
"Failed to copy to user_ptr=%pK size=%zu",
(void __user *)arg, sizeof(cmd_data));
rc = -EFAULT;
}
}
return rc;
}
#endif
const struct v4l2_subdev_core_ops cam_subdev_core_ops = {
.ioctl = cam_subdev_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl32 = cam_subdev_compat_ioctl,
#endif
.subscribe_event = cam_subdev_subscribe_event,
.unsubscribe_event = cam_subdev_unsubscribe_event,
};
static const struct v4l2_subdev_ops cam_subdev_ops = {
.core = &cam_subdev_core_ops,
};
int cam_subdev_remove(struct cam_subdev *sd)
{
if (!sd)
return -EINVAL;
cam_unregister_subdev(sd);
cam_node_deinit((struct cam_node *)sd->token);
kfree(sd->token);
return 0;
}
int cam_subdev_probe(struct cam_subdev *sd, struct platform_device *pdev,
char *name, uint32_t dev_type)
{
int rc;
struct cam_node *node = NULL;
if (!sd || !pdev || !name)
return -EINVAL;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
/* Setup camera v4l2 subdevice */
sd->pdev = pdev;
sd->name = name;
sd->ops = &cam_subdev_ops;
sd->token = node;
sd->sd_flags =
V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
sd->ent_function = dev_type;
rc = cam_register_subdev(sd);
if (rc) {
CAM_ERR(CAM_CORE, "cam_register_subdev() failed for dev: %s",
sd->name);
goto err;
}
platform_set_drvdata(sd->pdev, sd);
return rc;
err:
kfree(node);
return rc;
}

View file

@ -0,0 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/cpas_top
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/camss_top
obj-$(CONFIG_SPECTRA_CAMERA) += cpas_top/
obj-$(CONFIG_SPECTRA_CAMERA) += camss_top/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_cpas_soc.o cam_cpas_intf.o cam_cpas_hw.o

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,201 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_CPAS_HW_H_
#define _CAM_CPAS_HW_H_
#include "cam_cpas_api.h"
#include "cam_cpas_hw_intf.h"
#include "cam_common_util.h"
#define CAM_CPAS_MAX_CLIENTS 30
#define CAM_CPAS_INFLIGHT_WORKS 5
#define CAM_CPAS_AXI_MIN_MNOC_AB_BW (2048 * 1024)
#define CAM_CPAS_AXI_MIN_MNOC_IB_BW (2048 * 1024)
#define CAM_CPAS_AXI_MIN_CAMNOC_AB_BW (2048 * 1024)
#define CAM_CPAS_AXI_MIN_CAMNOC_IB_BW (3000000000UL)
#define CAM_CPAS_GET_CLIENT_IDX(handle) (handle)
#define CAM_CPAS_GET_CLIENT_HANDLE(indx) (indx)
#define CAM_CPAS_CLIENT_VALID(indx) \
((indx >= 0) && (indx < CAM_CPAS_MAX_CLIENTS))
#define CAM_CPAS_CLIENT_REGISTERED(cpas_core, indx) \
((CAM_CPAS_CLIENT_VALID(indx)) && \
(cpas_core->cpas_client[indx]))
#define CAM_CPAS_CLIENT_STARTED(cpas_core, indx) \
((CAM_CPAS_CLIENT_REGISTERED(cpas_core, indx)) && \
(cpas_core->cpas_client[indx]->started))
/**
* enum cam_cpas_access_type - Enum for Register access type
*/
enum cam_cpas_access_type {
CAM_REG_TYPE_READ,
CAM_REG_TYPE_WRITE,
CAM_REG_TYPE_READ_WRITE,
};
/**
* struct cam_cpas_internal_ops - CPAS Hardware layer internal ops
*
* @get_hw_info: Function pointer for get hw info
* @init_hw_version: Function pointer for hw init based on version
* @handle_irq: Function poniter for irq handling
* @setup_regbase: Function pointer for setup rebase indices
* @power_on: Function pointer for hw core specific power on settings
* @power_off: Function pointer for hw core specific power off settings
*
*/
struct cam_cpas_internal_ops {
int (*get_hw_info)(struct cam_hw_info *cpas_hw,
struct cam_cpas_hw_caps *hw_caps);
int (*init_hw_version)(struct cam_hw_info *cpas_hw,
struct cam_cpas_hw_caps *hw_caps);
irqreturn_t (*handle_irq)(int irq_num, void *data);
int (*setup_regbase)(struct cam_hw_soc_info *soc_info,
int32_t regbase_index[], int32_t num_reg_map);
int (*power_on)(struct cam_hw_info *cpas_hw);
int (*power_off)(struct cam_hw_info *cpas_hw);
};
/**
* struct cam_cpas_reg : CPAS register info
*
* @enable: Whether this reg info need to be enabled
* @access_type: Register access type
* @masked_value: Whether this register write/read is based on mask, shift
* @mask: Mask for this register value
* @shift: Shift for this register value
* @value: Register value
*
*/
struct cam_cpas_reg {
bool enable;
enum cam_cpas_access_type access_type;
bool masked_value;
uint32_t offset;
uint32_t mask;
uint32_t shift;
uint32_t value;
};
/**
* struct cam_cpas_client : CPAS Client structure info
*
* @data: Client register params
* @started: Whether client has streamed on
* @ahb_level: Determined/Applied ahb level for the client
* @axi_vote: Determined/Applied axi vote for the client
* @axi_port: Client's parent axi port
* @axi_sibling_client: Client's sibllings sharing the same axi port
*
*/
struct cam_cpas_client {
struct cam_cpas_register_params data;
bool started;
enum cam_vote_level ahb_level;
struct cam_axi_vote axi_vote;
struct cam_cpas_axi_port *axi_port;
struct list_head axi_sibling_client;
};
/**
* struct cam_cpas_bus_client : Bus client information
*
* @src: Bus master/src id
* @dst: Bus slave/dst id
* @pdata: Bus pdata information
* @client_id: Bus client id
* @num_usecases: Number of use cases for this client
* @num_paths: Number of paths for this client
* @curr_vote_level: current voted index
* @dyn_vote: Whether dynamic voting enabled
* @lock: Mutex lock used while voting on this client
* @valid: Whether bus client is valid
* @name: Name of the bus client
*
*/
struct cam_cpas_bus_client {
int src;
int dst;
struct msm_bus_scale_pdata *pdata;
uint32_t client_id;
int num_usecases;
int num_paths;
unsigned int curr_vote_level;
bool dyn_vote;
struct mutex lock;
bool valid;
const char *name;
};
/**
* struct cam_cpas_axi_port : AXI port information
*
* @sibling_port: Sibling AXI ports
* @clients_list_head: List head pointing to list of clients sharing this port
* @lock: Mutex lock for accessing this port
* @camnoc_bus: CAMNOC bus client info for this port
* @mnoc_bus: MNOC bus client info for this port
* @axi_port_name: Name of this AXI port
* @axi_port_node: Node representing this AXI Port
* @axi_port_mnoc_node: Node representing mnoc in this AXI Port
* @axi_port_camnoc_node: Node representing camnoc in this AXI Port
* @consolidated_axi_vote: Consolidated axi bw values for this AXI port
*/
struct cam_cpas_axi_port {
struct list_head sibling_port;
struct list_head clients_list_head;
struct mutex lock;
struct cam_cpas_bus_client camnoc_bus;
struct cam_cpas_bus_client mnoc_bus;
const char *axi_port_name;
struct device_node *axi_port_node;
struct device_node *axi_port_mnoc_node;
struct device_node *axi_port_camnoc_node;
struct cam_axi_vote consolidated_axi_vote;
};
/**
* struct cam_cpas : CPAS core data structure info
*
* @hw_caps: CPAS hw capabilities
* @cpas_client: Array of pointers to CPAS clients info
* @client_mutex: Mutex for accessing client info
* @num_clients: Total number of clients that CPAS supports
* @registered_clients: Number of Clients registered currently
* @streamon_clients: Number of Clients that are in start state currently
* @regbase_index: Register base indices for CPAS register base IDs
* @ahb_bus_client: AHB Bus client info
* @axi_ports_list_head: Head pointing to list of AXI ports
* @internal_ops: CPAS HW internal ops
* @work_queue: Work queue handle
*
*/
struct cam_cpas {
struct cam_cpas_hw_caps hw_caps;
struct cam_cpas_client *cpas_client[CAM_CPAS_MAX_CLIENTS];
struct mutex client_mutex[CAM_CPAS_MAX_CLIENTS];
uint32_t num_clients;
uint32_t registered_clients;
uint32_t streamon_clients;
int32_t regbase_index[CAM_CPAS_REG_MAX];
struct cam_cpas_bus_client ahb_bus_client;
struct list_head axi_ports_list_head;
struct cam_cpas_internal_ops internal_ops;
struct workqueue_struct *work_queue;
atomic_t irq_count;
wait_queue_head_t irq_count_wq;
};
int cam_camsstop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops);
int cam_cpastop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops);
int cam_cpas_util_reg_update(struct cam_hw_info *cpas_hw,
enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info);
#endif /* _CAM_CPAS_HW_H_ */

View file

@ -0,0 +1,128 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_CPAS_HW_INTF_H_
#define _CAM_CPAS_HW_INTF_H_
#include <linux/platform_device.h>
#include "cam_cpas_api.h"
#include "cam_hw.h"
#include "cam_hw_intf.h"
#include "cam_debug_util.h"
/* Number of times to retry while polling */
#define CAM_CPAS_POLL_RETRY_CNT 5
/* Minimum usecs to sleep while polling */
#define CAM_CPAS_POLL_MIN_USECS 200
/* Maximum usecs to sleep while polling */
#define CAM_CPAS_POLL_MAX_USECS 250
/**
* enum cam_cpas_hw_type - Enum for CPAS HW type
*/
enum cam_cpas_hw_type {
CAM_HW_CPASTOP,
CAM_HW_CAMSSTOP,
};
/**
* enum cam_cpas_hw_cmd_process - Enum for CPAS HW process command type
*/
enum cam_cpas_hw_cmd_process {
CAM_CPAS_HW_CMD_REGISTER_CLIENT,
CAM_CPAS_HW_CMD_UNREGISTER_CLIENT,
CAM_CPAS_HW_CMD_REG_WRITE,
CAM_CPAS_HW_CMD_REG_READ,
CAM_CPAS_HW_CMD_AHB_VOTE,
CAM_CPAS_HW_CMD_AXI_VOTE,
CAM_CPAS_HW_CMD_INVALID,
};
/**
* struct cam_cpas_hw_cmd_reg_read_write : CPAS cmd struct for reg read, write
*
* @client_handle: Client handle
* @reg_base: Register base type
* @offset: Register offset
* @value: Register value
* @mb: Whether to do operation with memory barrier
*
*/
struct cam_cpas_hw_cmd_reg_read_write {
uint32_t client_handle;
enum cam_cpas_reg_base reg_base;
uint32_t offset;
uint32_t value;
bool mb;
};
/**
* struct cam_cpas_hw_cmd_ahb_vote : CPAS cmd struct for AHB vote
*
* @client_handle: Client handle
* @ahb_vote: AHB voting info
*
*/
struct cam_cpas_hw_cmd_ahb_vote {
uint32_t client_handle;
struct cam_ahb_vote *ahb_vote;
};
/**
* struct cam_cpas_hw_cmd_axi_vote : CPAS cmd struct for AXI vote
*
* @client_handle: Client handle
* @axi_vote: axi bandwidth vote
*
*/
struct cam_cpas_hw_cmd_axi_vote {
uint32_t client_handle;
struct cam_axi_vote *axi_vote;
};
/**
* struct cam_cpas_hw_cmd_start : CPAS cmd struct for start
*
* @client_handle: Client handle
*
*/
struct cam_cpas_hw_cmd_start {
uint32_t client_handle;
struct cam_ahb_vote *ahb_vote;
struct cam_axi_vote *axi_vote;
};
/**
* struct cam_cpas_hw_cmd_stop : CPAS cmd struct for stop
*
* @client_handle: Client handle
*
*/
struct cam_cpas_hw_cmd_stop {
uint32_t client_handle;
};
/**
* struct cam_cpas_hw_caps : CPAS HW capabilities
*
* @camera_family: Camera family type
* @camera_version: Camera version
* @cpas_version: CPAS version
* @camera_capability: Camera hw capabilities
*
*/
struct cam_cpas_hw_caps {
uint32_t camera_family;
struct cam_hw_version camera_version;
struct cam_hw_version cpas_version;
uint32_t camera_capability;
};
int cam_cpas_hw_probe(struct platform_device *pdev,
struct cam_hw_intf **hw_intf);
int cam_cpas_hw_remove(struct cam_hw_intf *cpas_hw_intf);
#endif /* _CAM_CPAS_HW_INTF_H_ */

View file

@ -0,0 +1,655 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/of.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-subdev.h>
#include <media/cam_cpas.h>
#include <media/cam_req_mgr.h>
#include "cam_subdev.h"
#include "cam_cpas_hw_intf.h"
#define CAM_CPAS_DEV_NAME "cam-cpas"
#define CAM_CPAS_INTF_INITIALIZED() (g_cpas_intf && g_cpas_intf->probe_done)
/**
* struct cam_cpas_intf : CPAS interface
*
* @pdev: Platform device
* @subdev: Subdev info
* @hw_intf: CPAS HW interface
* @hw_caps: CPAS HW capabilities
* @intf_lock: CPAS interface mutex
* @open_cnt: CPAS subdev open count
* @probe_done: Whether CPAS prove completed
*
*/
struct cam_cpas_intf {
struct platform_device *pdev;
struct cam_subdev subdev;
struct cam_hw_intf *hw_intf;
struct cam_cpas_hw_caps hw_caps;
struct mutex intf_lock;
uint32_t open_cnt;
bool probe_done;
};
static struct cam_cpas_intf *g_cpas_intf;
int cam_cpas_get_cpas_hw_version(uint32_t *hw_version)
{
struct cam_hw_info *cpas_hw = NULL;
if (!CAM_CPAS_INTF_INITIALIZED()) {
CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
if (!hw_version) {
CAM_ERR(CAM_CPAS, "invalid input %pK", hw_version);
return -EINVAL;
}
cpas_hw = (struct cam_hw_info *) g_cpas_intf->hw_intf->hw_priv;
*hw_version = cpas_hw->soc_info.hw_version;
if (*hw_version == CAM_CPAS_TITAN_NONE) {
CAM_DBG(CAM_CPAS, "Didn't find a valid HW Version %d",
*hw_version);
}
return 0;
}
int cam_cpas_get_hw_info(uint32_t *camera_family,
struct cam_hw_version *camera_version,
struct cam_hw_version *cpas_version,
uint32_t *cam_caps)
{
if (!CAM_CPAS_INTF_INITIALIZED()) {
CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
if (!camera_family || !camera_version || !cpas_version || !cam_caps) {
CAM_ERR(CAM_CPAS, "invalid input %pK %pK %pK %pK",
camera_family, camera_version, cpas_version, cam_caps);
return -EINVAL;
}
*camera_family = g_cpas_intf->hw_caps.camera_family;
*camera_version = g_cpas_intf->hw_caps.camera_version;
*cpas_version = g_cpas_intf->hw_caps.cpas_version;
*cam_caps = g_cpas_intf->hw_caps.camera_capability;
return 0;
}
EXPORT_SYMBOL(cam_cpas_get_hw_info);
int cam_cpas_reg_write(uint32_t client_handle,
enum cam_cpas_reg_base reg_base, uint32_t offset, bool mb,
uint32_t value)
{
int rc;
if (!CAM_CPAS_INTF_INITIALIZED()) {
CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
struct cam_cpas_hw_cmd_reg_read_write cmd_reg_write;
cmd_reg_write.client_handle = client_handle;
cmd_reg_write.reg_base = reg_base;
cmd_reg_write.offset = offset;
cmd_reg_write.value = value;
cmd_reg_write.mb = mb;
rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
g_cpas_intf->hw_intf->hw_priv,
CAM_CPAS_HW_CMD_REG_WRITE, &cmd_reg_write,
sizeof(struct cam_cpas_hw_cmd_reg_read_write));
if (rc)
CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
} else {
CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
rc = -EINVAL;
}
return rc;
}
EXPORT_SYMBOL(cam_cpas_reg_write);
int cam_cpas_reg_read(uint32_t client_handle,
enum cam_cpas_reg_base reg_base, uint32_t offset, bool mb,
uint32_t *value)
{
int rc;
if (!CAM_CPAS_INTF_INITIALIZED()) {
CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
if (!value) {
CAM_ERR(CAM_CPAS, "Invalid arg value");
return -EINVAL;
}
if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
struct cam_cpas_hw_cmd_reg_read_write cmd_reg_read;
cmd_reg_read.client_handle = client_handle;
cmd_reg_read.reg_base = reg_base;
cmd_reg_read.offset = offset;
cmd_reg_read.mb = mb;
cmd_reg_read.value = 0;
rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
g_cpas_intf->hw_intf->hw_priv,
CAM_CPAS_HW_CMD_REG_READ, &cmd_reg_read,
sizeof(struct cam_cpas_hw_cmd_reg_read_write));
if (rc) {
CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
return rc;
}
*value = cmd_reg_read.value;
} else {
CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
rc = -EINVAL;
}
return rc;
}
EXPORT_SYMBOL(cam_cpas_reg_read);
int cam_cpas_update_axi_vote(uint32_t client_handle,
struct cam_axi_vote *axi_vote)
{
int rc;
if (!CAM_CPAS_INTF_INITIALIZED()) {
CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
struct cam_cpas_hw_cmd_axi_vote cmd_axi_vote;
cmd_axi_vote.client_handle = client_handle;
cmd_axi_vote.axi_vote = axi_vote;
rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
g_cpas_intf->hw_intf->hw_priv,
CAM_CPAS_HW_CMD_AXI_VOTE, &cmd_axi_vote,
sizeof(struct cam_cpas_hw_cmd_axi_vote));
if (rc)
CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
} else {
CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
rc = -EINVAL;
}
return rc;
}
EXPORT_SYMBOL(cam_cpas_update_axi_vote);
int cam_cpas_update_ahb_vote(uint32_t client_handle,
struct cam_ahb_vote *ahb_vote)
{
int rc;
if (!CAM_CPAS_INTF_INITIALIZED()) {
CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
struct cam_cpas_hw_cmd_ahb_vote cmd_ahb_vote;
cmd_ahb_vote.client_handle = client_handle;
cmd_ahb_vote.ahb_vote = ahb_vote;
rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
g_cpas_intf->hw_intf->hw_priv,
CAM_CPAS_HW_CMD_AHB_VOTE, &cmd_ahb_vote,
sizeof(struct cam_cpas_hw_cmd_ahb_vote));
if (rc)
CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
} else {
CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
rc = -EINVAL;
}
return rc;
}
EXPORT_SYMBOL(cam_cpas_update_ahb_vote);
int cam_cpas_stop(uint32_t client_handle)
{
int rc;
if (!CAM_CPAS_INTF_INITIALIZED()) {
CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
if (g_cpas_intf->hw_intf->hw_ops.stop) {
struct cam_cpas_hw_cmd_stop cmd_hw_stop;
cmd_hw_stop.client_handle = client_handle;
rc = g_cpas_intf->hw_intf->hw_ops.stop(
g_cpas_intf->hw_intf->hw_priv, &cmd_hw_stop,
sizeof(struct cam_cpas_hw_cmd_stop));
if (rc)
CAM_ERR(CAM_CPAS, "Failed in stop, rc=%d", rc);
} else {
CAM_ERR(CAM_CPAS, "Invalid stop ops");
rc = -EINVAL;
}
return rc;
}
EXPORT_SYMBOL(cam_cpas_stop);
int cam_cpas_start(uint32_t client_handle,
struct cam_ahb_vote *ahb_vote, struct cam_axi_vote *axi_vote)
{
int rc;
if (!CAM_CPAS_INTF_INITIALIZED()) {
CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
if (g_cpas_intf->hw_intf->hw_ops.start) {
struct cam_cpas_hw_cmd_start cmd_hw_start;
cmd_hw_start.client_handle = client_handle;
cmd_hw_start.ahb_vote = ahb_vote;
cmd_hw_start.axi_vote = axi_vote;
rc = g_cpas_intf->hw_intf->hw_ops.start(
g_cpas_intf->hw_intf->hw_priv, &cmd_hw_start,
sizeof(struct cam_cpas_hw_cmd_start));
if (rc)
CAM_ERR(CAM_CPAS, "Failed in start, rc=%d", rc);
} else {
CAM_ERR(CAM_CPAS, "Invalid start ops");
rc = -EINVAL;
}
return rc;
}
EXPORT_SYMBOL(cam_cpas_start);
int cam_cpas_unregister_client(uint32_t client_handle)
{
int rc;
if (!CAM_CPAS_INTF_INITIALIZED()) {
CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
g_cpas_intf->hw_intf->hw_priv,
CAM_CPAS_HW_CMD_UNREGISTER_CLIENT,
&client_handle, sizeof(uint32_t));
if (rc)
CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
} else {
CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
rc = -EINVAL;
}
return rc;
}
EXPORT_SYMBOL(cam_cpas_unregister_client);
int cam_cpas_register_client(
struct cam_cpas_register_params *register_params)
{
int rc;
if (!CAM_CPAS_INTF_INITIALIZED()) {
CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
g_cpas_intf->hw_intf->hw_priv,
CAM_CPAS_HW_CMD_REGISTER_CLIENT, register_params,
sizeof(struct cam_cpas_register_params));
if (rc)
CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
} else {
CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
rc = -EINVAL;
}
return rc;
}
EXPORT_SYMBOL(cam_cpas_register_client);
int cam_cpas_subdev_cmd(struct cam_cpas_intf *cpas_intf,
struct cam_control *cmd)
{
int rc = 0;
if (!cmd) {
CAM_ERR(CAM_CPAS, "Invalid input cmd");
return -EINVAL;
}
switch (cmd->op_code) {
case CAM_QUERY_CAP: {
struct cam_cpas_query_cap query;
rc = copy_from_user(&query, u64_to_user_ptr(cmd->handle),
sizeof(query));
if (rc) {
CAM_ERR(CAM_CPAS, "Failed in copy from user, rc=%d",
rc);
break;
}
rc = cam_cpas_get_hw_info(&query.camera_family,
&query.camera_version, &query.cpas_version,
&query.reserved);
if (rc)
break;
rc = copy_to_user(u64_to_user_ptr(cmd->handle), &query,
sizeof(query));
if (rc)
CAM_ERR(CAM_CPAS, "Failed in copy to user, rc=%d", rc);
break;
}
case CAM_SD_SHUTDOWN:
break;
default:
CAM_ERR(CAM_CPAS, "Unknown op code %d for CPAS", cmd->op_code);
rc = -EINVAL;
break;
}
return rc;
}
static int cam_cpas_subdev_open(struct v4l2_subdev *sd,
struct v4l2_subdev_fh *fh)
{
struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
if (!cpas_intf || !cpas_intf->probe_done) {
CAM_ERR(CAM_CPAS, "CPAS not initialized");
return -ENODEV;
}
mutex_lock(&cpas_intf->intf_lock);
cpas_intf->open_cnt++;
CAM_DBG(CAM_CPAS, "CPAS Subdev open count %d", cpas_intf->open_cnt);
mutex_unlock(&cpas_intf->intf_lock);
return 0;
}
static int cam_cpas_subdev_close(struct v4l2_subdev *sd,
struct v4l2_subdev_fh *fh)
{
struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
if (!cpas_intf || !cpas_intf->probe_done) {
CAM_ERR(CAM_CPAS, "CPAS not initialized");
return -ENODEV;
}
mutex_lock(&cpas_intf->intf_lock);
cpas_intf->open_cnt--;
CAM_DBG(CAM_CPAS, "CPAS Subdev close count %d", cpas_intf->open_cnt);
mutex_unlock(&cpas_intf->intf_lock);
return 0;
}
static long cam_cpas_subdev_ioctl(struct v4l2_subdev *sd,
unsigned int cmd, void *arg)
{
int32_t rc;
struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
if (!cpas_intf || !cpas_intf->probe_done) {
CAM_ERR(CAM_CPAS, "CPAS not initialized");
return -ENODEV;
}
switch (cmd) {
case VIDIOC_CAM_CONTROL:
rc = cam_cpas_subdev_cmd(cpas_intf, (struct cam_control *) arg);
break;
default:
CAM_ERR(CAM_CPAS, "Invalid command %d for CPAS!", cmd);
rc = -EINVAL;
break;
}
return rc;
}
#ifdef CONFIG_COMPAT
static long cam_cpas_subdev_compat_ioctl(struct v4l2_subdev *sd,
unsigned int cmd, unsigned long arg)
{
struct cam_control cmd_data;
int32_t rc;
struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
if (!cpas_intf || !cpas_intf->probe_done) {
CAM_ERR(CAM_CPAS, "CPAS not initialized");
return -ENODEV;
}
if (copy_from_user(&cmd_data, (void __user *)arg,
sizeof(cmd_data))) {
CAM_ERR(CAM_CPAS, "Failed to copy from user_ptr=%pK size=%zu",
(void __user *)arg, sizeof(cmd_data));
return -EFAULT;
}
switch (cmd) {
case VIDIOC_CAM_CONTROL:
rc = cam_cpas_subdev_cmd(cpas_intf, &cmd_data);
break;
default:
CAM_ERR(CAM_CPAS, "Invalid command %d for CPAS!", cmd);
rc = -EINVAL;
break;
}
if (!rc) {
if (copy_to_user((void __user *)arg, &cmd_data,
sizeof(cmd_data))) {
CAM_ERR(CAM_CPAS,
"Failed to copy to user_ptr=%pK size=%zu",
(void __user *)arg, sizeof(cmd_data));
rc = -EFAULT;
}
}
return rc;
}
#endif
static struct v4l2_subdev_core_ops cpas_subdev_core_ops = {
.ioctl = cam_cpas_subdev_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl32 = cam_cpas_subdev_compat_ioctl,
#endif
};
static const struct v4l2_subdev_ops cpas_subdev_ops = {
.core = &cpas_subdev_core_ops,
};
static const struct v4l2_subdev_internal_ops cpas_subdev_intern_ops = {
.open = cam_cpas_subdev_open,
.close = cam_cpas_subdev_close,
};
static int cam_cpas_subdev_register(struct platform_device *pdev)
{
int rc;
struct cam_subdev *subdev;
if (!g_cpas_intf)
return -EINVAL;
subdev = &g_cpas_intf->subdev;
subdev->name = CAM_CPAS_DEV_NAME;
subdev->pdev = pdev;
subdev->ops = &cpas_subdev_ops;
subdev->internal_ops = &cpas_subdev_intern_ops;
subdev->token = g_cpas_intf;
subdev->sd_flags =
V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
subdev->ent_function = CAM_CPAS_DEVICE_TYPE;
rc = cam_register_subdev(subdev);
if (rc) {
CAM_ERR(CAM_CPAS, "failed register subdev: %s!",
CAM_CPAS_DEV_NAME);
return rc;
}
platform_set_drvdata(g_cpas_intf->pdev, g_cpas_intf);
return rc;
}
static int cam_cpas_dev_probe(struct platform_device *pdev)
{
struct cam_cpas_hw_caps *hw_caps;
struct cam_hw_intf *hw_intf;
int rc;
if (g_cpas_intf) {
CAM_ERR(CAM_CPAS, "cpas dev proble already done");
return -EALREADY;
}
g_cpas_intf = kzalloc(sizeof(*g_cpas_intf), GFP_KERNEL);
if (!g_cpas_intf)
return -ENOMEM;
mutex_init(&g_cpas_intf->intf_lock);
g_cpas_intf->pdev = pdev;
rc = cam_cpas_hw_probe(pdev, &g_cpas_intf->hw_intf);
if (rc || (g_cpas_intf->hw_intf == NULL)) {
CAM_ERR(CAM_CPAS, "Failed in hw probe, rc=%d", rc);
goto error_destroy_mem;
}
hw_intf = g_cpas_intf->hw_intf;
hw_caps = &g_cpas_intf->hw_caps;
if (hw_intf->hw_ops.get_hw_caps) {
rc = hw_intf->hw_ops.get_hw_caps(hw_intf->hw_priv,
hw_caps, sizeof(struct cam_cpas_hw_caps));
if (rc) {
CAM_ERR(CAM_CPAS, "Failed in get_hw_caps, rc=%d", rc);
goto error_hw_remove;
}
} else {
CAM_ERR(CAM_CPAS, "Invalid get_hw_caps ops");
goto error_hw_remove;
}
rc = cam_cpas_subdev_register(pdev);
if (rc)
goto error_hw_remove;
g_cpas_intf->probe_done = true;
CAM_DBG(CAM_CPAS,
"CPAS INTF Probe success %d, %d.%d.%d, %d.%d.%d, 0x%x",
hw_caps->camera_family, hw_caps->camera_version.major,
hw_caps->camera_version.minor, hw_caps->camera_version.incr,
hw_caps->cpas_version.major, hw_caps->cpas_version.minor,
hw_caps->cpas_version.incr, hw_caps->camera_capability);
return rc;
error_hw_remove:
cam_cpas_hw_remove(g_cpas_intf->hw_intf);
error_destroy_mem:
mutex_destroy(&g_cpas_intf->intf_lock);
kfree(g_cpas_intf);
g_cpas_intf = NULL;
CAM_ERR(CAM_CPAS, "CPAS probe failed");
return rc;
}
static int cam_cpas_dev_remove(struct platform_device *dev)
{
if (!CAM_CPAS_INTF_INITIALIZED()) {
CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
mutex_lock(&g_cpas_intf->intf_lock);
g_cpas_intf->probe_done = false;
cam_unregister_subdev(&g_cpas_intf->subdev);
cam_cpas_hw_remove(g_cpas_intf->hw_intf);
mutex_unlock(&g_cpas_intf->intf_lock);
mutex_destroy(&g_cpas_intf->intf_lock);
kfree(g_cpas_intf);
g_cpas_intf = NULL;
return 0;
}
static const struct of_device_id cam_cpas_dt_match[] = {
{.compatible = "qcom,cam-cpas"},
{}
};
static struct platform_driver cam_cpas_driver = {
.probe = cam_cpas_dev_probe,
.remove = cam_cpas_dev_remove,
.driver = {
.name = CAM_CPAS_DEV_NAME,
.owner = THIS_MODULE,
.of_match_table = cam_cpas_dt_match,
.suppress_bind_attrs = true,
},
};
static int __init cam_cpas_dev_init_module(void)
{
return platform_driver_register(&cam_cpas_driver);
}
static void __exit cam_cpas_dev_exit_module(void)
{
platform_driver_unregister(&cam_cpas_driver);
}
module_init(cam_cpas_dev_init_module);
module_exit(cam_cpas_dev_exit_module);
MODULE_DESCRIPTION("MSM CPAS driver");
MODULE_LICENSE("GPL v2");

View file

@ -0,0 +1,277 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/device.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "cam_cpas_api.h"
#include "cam_cpas_hw_intf.h"
#include "cam_cpas_hw.h"
#include "cam_cpas_soc.h"
int cam_cpas_get_custom_dt_info(struct platform_device *pdev,
struct cam_cpas_private_soc *soc_private)
{
struct device_node *of_node;
int count = 0, i = 0, rc = 0;
if (!soc_private || !pdev) {
CAM_ERR(CAM_CPAS, "invalid input arg %pK %pK",
soc_private, pdev);
return -EINVAL;
}
of_node = pdev->dev.of_node;
rc = of_property_read_string_index(of_node, "arch-compat", 0,
(const char **)&soc_private->arch_compat);
if (rc) {
CAM_ERR(CAM_CPAS, "device %s failed to read arch-compat",
pdev->name);
return rc;
}
soc_private->camnoc_axi_min_ib_bw = 0;
rc = of_property_read_u64(of_node,
"camnoc-axi-min-ib-bw",
&soc_private->camnoc_axi_min_ib_bw);
if (rc == -EOVERFLOW) {
soc_private->camnoc_axi_min_ib_bw = 0;
rc = of_property_read_u32(of_node,
"camnoc-axi-min-ib-bw",
(u32 *)&soc_private->camnoc_axi_min_ib_bw);
}
if (rc) {
CAM_DBG(CAM_CPAS,
"failed to read camnoc-axi-min-ib-bw rc:%d", rc);
soc_private->camnoc_axi_min_ib_bw =
CAM_CPAS_AXI_MIN_CAMNOC_IB_BW;
}
CAM_DBG(CAM_CPAS, "camnoc-axi-min-ib-bw = %llu",
soc_private->camnoc_axi_min_ib_bw);
soc_private->client_id_based = of_property_read_bool(of_node,
"client-id-based");
count = of_property_count_strings(of_node, "client-names");
if (count <= 0) {
CAM_ERR(CAM_CPAS, "no client-names found");
count = 0;
return -EINVAL;
}
soc_private->num_clients = count;
CAM_DBG(CAM_CPAS,
"arch-compat=%s, client_id_based = %d, num_clients=%d",
soc_private->arch_compat, soc_private->client_id_based,
soc_private->num_clients);
for (i = 0; i < soc_private->num_clients; i++) {
rc = of_property_read_string_index(of_node,
"client-names", i, &soc_private->client_name[i]);
if (rc) {
CAM_ERR(CAM_CPAS, "no client-name at cnt=%d", i);
return -ENODEV;
}
CAM_DBG(CAM_CPAS, "Client[%d] : %s", i,
soc_private->client_name[i]);
}
count = of_property_count_strings(of_node, "client-axi-port-names");
if ((count <= 0) || (count != soc_private->num_clients)) {
CAM_ERR(CAM_CPAS, "incorrect client-axi-port-names info %d %d",
count, soc_private->num_clients);
count = 0;
return -EINVAL;
}
for (i = 0; i < soc_private->num_clients; i++) {
rc = of_property_read_string_index(of_node,
"client-axi-port-names", i,
&soc_private->client_axi_port_name[i]);
if (rc) {
CAM_ERR(CAM_CPAS, "no client-name at cnt=%d", i);
return -ENODEV;
}
CAM_DBG(CAM_CPAS, "Client AXI Port[%d] : %s", i,
soc_private->client_axi_port_name[i]);
}
soc_private->axi_camnoc_based = of_property_read_bool(of_node,
"client-bus-camnoc-based");
soc_private->control_camnoc_axi_clk = of_property_read_bool(of_node,
"control-camnoc-axi-clk");
if (soc_private->control_camnoc_axi_clk == true) {
rc = of_property_read_u32(of_node, "camnoc-bus-width",
&soc_private->camnoc_bus_width);
if (rc || (soc_private->camnoc_bus_width == 0)) {
CAM_ERR(CAM_CPAS, "Bus width not found rc=%d, %d",
rc, soc_private->camnoc_bus_width);
return rc;
}
rc = of_property_read_u32(of_node,
"camnoc-axi-clk-bw-margin-perc",
&soc_private->camnoc_axi_clk_bw_margin);
if (rc) {
/* this is not fatal, overwrite rc */
rc = 0;
soc_private->camnoc_axi_clk_bw_margin = 0;
}
}
CAM_DBG(CAM_CPAS,
"control_camnoc_axi_clk=%d, width=%d, margin=%d",
soc_private->control_camnoc_axi_clk,
soc_private->camnoc_bus_width,
soc_private->camnoc_axi_clk_bw_margin);
count = of_property_count_u32_elems(of_node, "vdd-corners");
if ((count > 0) && (count <= CAM_REGULATOR_LEVEL_MAX) &&
(of_property_count_strings(of_node, "vdd-corner-ahb-mapping") ==
count)) {
const char *ahb_string;
for (i = 0; i < count; i++) {
rc = of_property_read_u32_index(of_node, "vdd-corners",
i, &soc_private->vdd_ahb[i].vdd_corner);
if (rc) {
CAM_ERR(CAM_CPAS,
"vdd-corners failed at index=%d", i);
return -ENODEV;
}
rc = of_property_read_string_index(of_node,
"vdd-corner-ahb-mapping", i, &ahb_string);
if (rc) {
CAM_ERR(CAM_CPAS,
"no ahb-mapping at index=%d", i);
return -ENODEV;
}
rc = cam_soc_util_get_level_from_string(ahb_string,
&soc_private->vdd_ahb[i].ahb_level);
if (rc) {
CAM_ERR(CAM_CPAS,
"invalid ahb-string at index=%d", i);
return -EINVAL;
}
CAM_DBG(CAM_CPAS,
"Vdd-AHB mapping [%d] : [%d] [%s] [%d]", i,
soc_private->vdd_ahb[i].vdd_corner,
ahb_string, soc_private->vdd_ahb[i].ahb_level);
}
soc_private->num_vdd_ahb_mapping = count;
}
return 0;
}
int cam_cpas_soc_init_resources(struct cam_hw_soc_info *soc_info,
irq_handler_t irq_handler, void *irq_data)
{
int rc = 0;
rc = cam_soc_util_get_dt_properties(soc_info);
if (rc) {
CAM_ERR(CAM_CPAS, "failed in get_dt_properties, rc=%d", rc);
return rc;
}
if (soc_info->irq_line && !irq_handler) {
CAM_ERR(CAM_CPAS, "Invalid IRQ handler");
return -EINVAL;
}
rc = cam_soc_util_request_platform_resource(soc_info, irq_handler,
irq_data);
if (rc) {
CAM_ERR(CAM_CPAS, "failed in request_platform_resource, rc=%d",
rc);
return rc;
}
soc_info->soc_private = kzalloc(sizeof(struct cam_cpas_private_soc),
GFP_KERNEL);
if (!soc_info->soc_private) {
rc = -ENOMEM;
goto release_res;
}
rc = cam_cpas_get_custom_dt_info(soc_info->pdev, soc_info->soc_private);
if (rc) {
CAM_ERR(CAM_CPAS, "failed in get_custom_info, rc=%d", rc);
goto free_soc_private;
}
return rc;
free_soc_private:
kfree(soc_info->soc_private);
release_res:
cam_soc_util_release_platform_resource(soc_info);
return rc;
}
int cam_cpas_soc_deinit_resources(struct cam_hw_soc_info *soc_info)
{
int rc;
rc = cam_soc_util_release_platform_resource(soc_info);
if (rc)
CAM_ERR(CAM_CPAS, "release platform failed, rc=%d", rc);
kfree(soc_info->soc_private);
soc_info->soc_private = NULL;
return rc;
}
int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info,
enum cam_vote_level default_level)
{
int rc = 0;
rc = cam_soc_util_enable_platform_resource(soc_info, true,
default_level, true);
if (rc)
CAM_ERR(CAM_CPAS, "enable platform resource failed, rc=%d", rc);
return rc;
}
int cam_cpas_soc_disable_resources(struct cam_hw_soc_info *soc_info,
bool disable_clocks, bool disable_irq)
{
int rc = 0;
rc = cam_soc_util_disable_platform_resource(soc_info,
disable_clocks, disable_irq);
if (rc)
CAM_ERR(CAM_CPAS, "disable platform failed, rc=%d", rc);
return rc;
}
int cam_cpas_soc_disable_irq(struct cam_hw_soc_info *soc_info)
{
int rc = 0;
rc = cam_soc_util_irq_disable(soc_info);
if (rc)
CAM_ERR(CAM_CPAS, "disable irq failed, rc=%d", rc);
return rc;
}

View file

@ -0,0 +1,69 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_CPAS_SOC_H_
#define _CAM_CPAS_SOC_H_
#include "cam_soc_util.h"
#include "cam_cpas_hw.h"
#define CAM_REGULATOR_LEVEL_MAX 16
/**
* struct cam_cpas_vdd_ahb_mapping : Voltage to ahb level mapping
*
* @vdd_corner : Voltage corner value
* @ahb_level : AHB vote level corresponds to this vdd_corner
*
*/
struct cam_cpas_vdd_ahb_mapping {
unsigned int vdd_corner;
enum cam_vote_level ahb_level;
};
/**
* struct cam_cpas_private_soc : CPAS private DT info
*
* @arch_compat: ARCH compatible string
* @client_id_based: Whether clients are id based
* @num_clients: Number of clients supported
* @client_name: Client names
* @axi_camnoc_based: Whether AXi access is camnoc based
* @client_axi_port_name: AXI Port name for each client
* @axi_port_list_node : Node representing AXI Ports list
* @num_vdd_ahb_mapping : Number of vdd to ahb level mapping supported
* @vdd_ahb : AHB level mapping info for the supported vdd levels
* @control_camnoc_axi_clk : Whether CPAS driver need to set camnoc axi clk freq
* @camnoc_bus_width : CAMNOC Bus width
* @camnoc_axi_clk_bw_margin : BW Margin in percentage to add while calculating
* camnoc axi clock
* @camnoc_axi_min_ib_bw: Min camnoc BW which varies based on target
*
*/
struct cam_cpas_private_soc {
const char *arch_compat;
bool client_id_based;
uint32_t num_clients;
const char *client_name[CAM_CPAS_MAX_CLIENTS];
bool axi_camnoc_based;
const char *client_axi_port_name[CAM_CPAS_MAX_CLIENTS];
struct device_node *axi_port_list_node;
uint32_t num_vdd_ahb_mapping;
struct cam_cpas_vdd_ahb_mapping vdd_ahb[CAM_REGULATOR_LEVEL_MAX];
bool control_camnoc_axi_clk;
uint32_t camnoc_bus_width;
uint32_t camnoc_axi_clk_bw_margin;
uint64_t camnoc_axi_min_ib_bw;
};
int cam_cpas_soc_init_resources(struct cam_hw_soc_info *soc_info,
irq_handler_t vfe_irq_handler, void *irq_data);
int cam_cpas_soc_deinit_resources(struct cam_hw_soc_info *soc_info);
int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info,
enum cam_vote_level default_level);
int cam_cpas_soc_disable_resources(struct cam_hw_soc_info *soc_info,
bool disable_clocks, bool disable_irq);
int cam_cpas_soc_disable_irq(struct cam_hw_soc_info *soc_info);
#endif /* _CAM_CPAS_SOC_H_ */

View file

@ -0,0 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas
obj-$(CONFIG_SPECTRA_CAMERA) += cam_camsstop_hw.o

View file

@ -0,0 +1,82 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include "cam_cpas_hw_intf.h"
#include "cam_cpas_hw.h"
#include "cam_cpas_soc.h"
int cam_camsstop_get_hw_info(struct cam_hw_info *cpas_hw,
struct cam_cpas_hw_caps *hw_caps)
{
struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
int32_t reg_indx = cpas_core->regbase_index[CAM_CPAS_REG_CAMSS];
uint32_t reg_value;
if (reg_indx == -1)
return -EINVAL;
hw_caps->camera_family = CAM_FAMILY_CAMERA_SS;
reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x0);
hw_caps->camera_version.major =
CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
hw_caps->camera_version.minor =
CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
hw_caps->camera_version.incr =
CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
CAM_DBG(CAM_FD, "Family %d, version %d.%d.%d",
hw_caps->camera_family, hw_caps->camera_version.major,
hw_caps->camera_version.minor, hw_caps->camera_version.incr);
return 0;
}
int cam_camsstop_setup_regbase_indices(struct cam_hw_soc_info *soc_info,
int32_t regbase_index[], int32_t num_reg_map)
{
uint32_t index;
int rc;
if (num_reg_map > CAM_CPAS_REG_MAX) {
CAM_ERR(CAM_CPAS, "invalid num_reg_map=%d", num_reg_map);
return -EINVAL;
}
if (soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) {
CAM_ERR(CAM_CPAS, "invalid num_mem_block=%d",
soc_info->num_mem_block);
return -EINVAL;
}
rc = cam_common_util_get_string_index(soc_info->mem_block_name,
soc_info->num_mem_block, "cam_camss", &index);
if ((rc == 0) && (index < num_reg_map)) {
regbase_index[CAM_CPAS_REG_CAMSS] = index;
} else {
CAM_ERR(CAM_CPAS, "regbase not found for CAM_CPAS_REG_CAMSS");
return -EINVAL;
}
return 0;
}
int cam_camsstop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops)
{
if (!internal_ops) {
CAM_ERR(CAM_CPAS, "invalid NULL param");
return -EINVAL;
}
internal_ops->get_hw_info = cam_camsstop_get_hw_info;
internal_ops->init_hw_version = NULL;
internal_ops->handle_irq = NULL;
internal_ops->setup_regbase = cam_camsstop_setup_regbase_indices;
internal_ops->power_on = NULL;
internal_ops->power_off = NULL;
return 0;
}

View file

@ -0,0 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas
obj-$(CONFIG_SPECTRA_CAMERA) += cam_cpastop_hw.o

View file

@ -0,0 +1,613 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include "cam_cpas_hw_intf.h"
#include "cam_cpas_hw.h"
#include "cam_cpastop_hw.h"
#include "cam_io_util.h"
#include "cam_cpas_soc.h"
#include "cpastop100.h"
#include "cpastop_v150_100.h"
#include "cpastop_v170_110.h"
#include "cpastop_v175_100.h"
#include "cpastop_v175_101.h"
struct cam_camnoc_info *camnoc_info;
#define CAMNOC_SLAVE_MAX_ERR_CODE 7
static const char * const camnoc_salve_err_code[] = {
"Target Error", /* err code 0 */
"Address decode error", /* err code 1 */
"Unsupported request", /* err code 2 */
"Disconnected target", /* err code 3 */
"Security violation", /* err code 4 */
"Hidden security violation", /* err code 5 */
"Timeout Error", /* err code 6 */
"Unknown Error", /* unknown err code */
};
static int cam_cpastop_get_hw_info(struct cam_hw_info *cpas_hw,
struct cam_cpas_hw_caps *hw_caps)
{
struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
int32_t reg_indx = cpas_core->regbase_index[CAM_CPAS_REG_CPASTOP];
uint32_t reg_value;
if (reg_indx == -1)
return -EINVAL;
hw_caps->camera_family = CAM_FAMILY_CPAS_SS;
reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x0);
hw_caps->camera_version.major =
CAM_BITS_MASK_SHIFT(reg_value, 0xff0000, 0x10);
hw_caps->camera_version.minor =
CAM_BITS_MASK_SHIFT(reg_value, 0xff00, 0x8);
hw_caps->camera_version.incr =
CAM_BITS_MASK_SHIFT(reg_value, 0xff, 0x0);
reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x4);
hw_caps->cpas_version.major =
CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
hw_caps->cpas_version.minor =
CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
hw_caps->cpas_version.incr =
CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x8);
hw_caps->camera_capability = reg_value;
CAM_DBG(CAM_FD, "Family %d, version %d.%d.%d, cpas %d.%d.%d, cap 0x%x",
hw_caps->camera_family, hw_caps->camera_version.major,
hw_caps->camera_version.minor, hw_caps->camera_version.incr,
hw_caps->cpas_version.major, hw_caps->cpas_version.minor,
hw_caps->cpas_version.incr, hw_caps->camera_capability);
soc_info->hw_version = CAM_CPAS_TITAN_NONE;
if ((hw_caps->camera_version.major == 1) &&
(hw_caps->camera_version.minor == 7) &&
(hw_caps->camera_version.incr == 0)) {
if ((hw_caps->cpas_version.major == 1) &&
(hw_caps->cpas_version.minor == 0) &&
(hw_caps->cpas_version.incr == 0))
soc_info->hw_version = CAM_CPAS_TITAN_170_V100;
else if ((hw_caps->cpas_version.major == 1) &&
(hw_caps->cpas_version.minor == 1) &&
(hw_caps->cpas_version.incr == 0))
soc_info->hw_version = CAM_CPAS_TITAN_170_V110;
else if ((hw_caps->cpas_version.major == 1) &&
(hw_caps->cpas_version.minor == 2) &&
(hw_caps->cpas_version.incr == 0))
soc_info->hw_version = CAM_CPAS_TITAN_170_V120;
} else if ((hw_caps->camera_version.major == 1) &&
(hw_caps->camera_version.minor == 7) &&
(hw_caps->camera_version.incr == 5)) {
if ((hw_caps->cpas_version.major == 1) &&
(hw_caps->cpas_version.minor == 0) &&
(hw_caps->cpas_version.incr == 0))
soc_info->hw_version = CAM_CPAS_TITAN_175_V100;
else if ((hw_caps->cpas_version.major == 1) &&
(hw_caps->cpas_version.minor == 0) &&
(hw_caps->cpas_version.incr == 1))
soc_info->hw_version = CAM_CPAS_TITAN_175_V101;
} else if ((hw_caps->camera_version.major == 1) &&
(hw_caps->camera_version.minor == 5) &&
(hw_caps->camera_version.incr == 0)) {
if ((hw_caps->cpas_version.major == 1) &&
(hw_caps->cpas_version.minor == 0) &&
(hw_caps->cpas_version.incr == 0))
soc_info->hw_version = CAM_CPAS_TITAN_150_V100;
}
CAM_DBG(CAM_CPAS, "CPAS HW VERSION %x", soc_info->hw_version);
return 0;
}
static int cam_cpastop_setup_regbase_indices(struct cam_hw_soc_info *soc_info,
int32_t regbase_index[], int32_t num_reg_map)
{
uint32_t index;
int rc;
if (num_reg_map > CAM_CPAS_REG_MAX) {
CAM_ERR(CAM_CPAS, "invalid num_reg_map=%d", num_reg_map);
return -EINVAL;
}
if (soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) {
CAM_ERR(CAM_CPAS, "invalid num_mem_block=%d",
soc_info->num_mem_block);
return -EINVAL;
}
rc = cam_common_util_get_string_index(soc_info->mem_block_name,
soc_info->num_mem_block, "cam_cpas_top", &index);
if ((rc == 0) && (index < num_reg_map)) {
regbase_index[CAM_CPAS_REG_CPASTOP] = index;
} else {
CAM_ERR(CAM_CPAS, "regbase not found for CPASTOP, rc=%d, %d %d",
rc, index, num_reg_map);
return -EINVAL;
}
rc = cam_common_util_get_string_index(soc_info->mem_block_name,
soc_info->num_mem_block, "cam_camnoc", &index);
if ((rc == 0) && (index < num_reg_map)) {
regbase_index[CAM_CPAS_REG_CAMNOC] = index;
} else {
CAM_ERR(CAM_CPAS, "regbase not found for CAMNOC, rc=%d, %d %d",
rc, index, num_reg_map);
return -EINVAL;
}
return 0;
}
static int cam_cpastop_handle_errlogger(struct cam_cpas *cpas_core,
struct cam_hw_soc_info *soc_info,
struct cam_camnoc_irq_slave_err_data *slave_err)
{
int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
int err_code_index = 0;
if (!camnoc_info->err_logger) {
CAM_ERR_RATE_LIMIT(CAM_CPAS, "Invalid err logger info");
return -EINVAL;
}
slave_err->mainctrl.value = cam_io_r_mb(
soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->err_logger->mainctrl);
slave_err->errvld.value = cam_io_r_mb(
soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->err_logger->errvld);
slave_err->errlog0_low.value = cam_io_r_mb(
soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->err_logger->errlog0_low);
slave_err->errlog0_high.value = cam_io_r_mb(
soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->err_logger->errlog0_high);
slave_err->errlog1_low.value = cam_io_r_mb(
soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->err_logger->errlog1_low);
slave_err->errlog1_high.value = cam_io_r_mb(
soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->err_logger->errlog1_high);
slave_err->errlog2_low.value = cam_io_r_mb(
soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->err_logger->errlog2_low);
slave_err->errlog2_high.value = cam_io_r_mb(
soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->err_logger->errlog2_high);
slave_err->errlog3_low.value = cam_io_r_mb(
soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->err_logger->errlog3_low);
slave_err->errlog3_high.value = cam_io_r_mb(
soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->err_logger->errlog3_high);
CAM_ERR_RATE_LIMIT(CAM_CPAS,
"Possible memory configuration issue, fault at SMMU raised as CAMNOC SLAVE_IRQ");
CAM_ERR_RATE_LIMIT(CAM_CPAS,
"mainctrl[0x%x 0x%x] errvld[0x%x 0x%x] stall_en=%d, fault_en=%d, err_vld=%d",
camnoc_info->err_logger->mainctrl,
slave_err->mainctrl.value,
camnoc_info->err_logger->errvld,
slave_err->errvld.value,
slave_err->mainctrl.stall_en,
slave_err->mainctrl.fault_en,
slave_err->errvld.err_vld);
err_code_index = slave_err->errlog0_low.err_code;
if (err_code_index > CAMNOC_SLAVE_MAX_ERR_CODE)
err_code_index = CAMNOC_SLAVE_MAX_ERR_CODE;
CAM_ERR_RATE_LIMIT(CAM_CPAS,
"errlog0 low[0x%x 0x%x] high[0x%x 0x%x] loginfo_vld=%d, word_error=%d, non_secure=%d, device=%d, opc=%d, err_code=%d(%s) sizef=%d, addr_space=%d, len1=%d",
camnoc_info->err_logger->errlog0_low,
slave_err->errlog0_low.value,
camnoc_info->err_logger->errlog0_high,
slave_err->errlog0_high.value,
slave_err->errlog0_low.loginfo_vld,
slave_err->errlog0_low.word_error,
slave_err->errlog0_low.non_secure,
slave_err->errlog0_low.device,
slave_err->errlog0_low.opc,
slave_err->errlog0_low.err_code,
camnoc_salve_err_code[err_code_index],
slave_err->errlog0_low.sizef,
slave_err->errlog0_low.addr_space,
slave_err->errlog0_high.len1);
CAM_ERR_RATE_LIMIT(CAM_CPAS,
"errlog1_low[0x%x 0x%x] errlog1_high[0x%x 0x%x] errlog2_low[0x%x 0x%x] errlog2_high[0x%x 0x%x] errlog3_low[0x%x 0x%x] errlog3_high[0x%x 0x%x]",
camnoc_info->err_logger->errlog1_low,
slave_err->errlog1_low.value,
camnoc_info->err_logger->errlog1_high,
slave_err->errlog1_high.value,
camnoc_info->err_logger->errlog2_low,
slave_err->errlog2_low.value,
camnoc_info->err_logger->errlog2_high,
slave_err->errlog2_high.value,
camnoc_info->err_logger->errlog3_low,
slave_err->errlog3_low.value,
camnoc_info->err_logger->errlog3_high,
slave_err->errlog3_high.value);
return 0;
}
static int cam_cpastop_handle_ubwc_enc_err(struct cam_cpas *cpas_core,
struct cam_hw_soc_info *soc_info, int i,
struct cam_camnoc_irq_ubwc_enc_data *enc_err)
{
int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
enc_err->encerr_status.value =
cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->irq_err[i].err_status.offset);
/* Let clients handle the UBWC errors */
CAM_DBG(CAM_CPAS,
"ubwc enc err [%d]: offset[0x%x] value[0x%x]",
i, camnoc_info->irq_err[i].err_status.offset,
enc_err->encerr_status.value);
return 0;
}
static int cam_cpastop_handle_ubwc_dec_err(struct cam_cpas *cpas_core,
struct cam_hw_soc_info *soc_info, int i,
struct cam_camnoc_irq_ubwc_dec_data *dec_err)
{
int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
dec_err->decerr_status.value =
cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->irq_err[i].err_status.offset);
/* Let clients handle the UBWC errors */
CAM_DBG(CAM_CPAS,
"ubwc dec err status [%d]: offset[0x%x] value[0x%x] thr_err=%d, fcl_err=%d, len_md_err=%d, format_err=%d",
i, camnoc_info->irq_err[i].err_status.offset,
dec_err->decerr_status.value,
dec_err->decerr_status.thr_err,
dec_err->decerr_status.fcl_err,
dec_err->decerr_status.len_md_err,
dec_err->decerr_status.format_err);
return 0;
}
static int cam_cpastop_handle_ahb_timeout_err(struct cam_hw_info *cpas_hw,
struct cam_camnoc_irq_ahb_timeout_data *ahb_err)
{
CAM_ERR_RATE_LIMIT(CAM_CPAS, "ahb timeout error");
return 0;
}
static int cam_cpastop_disable_test_irq(struct cam_hw_info *cpas_hw)
{
camnoc_info->irq_sbm->sbm_clear.value &= ~0x4;
camnoc_info->irq_sbm->sbm_enable.value &= ~0x100;
camnoc_info->irq_err[CAM_CAMNOC_HW_IRQ_CAMNOC_TEST].enable = false;
return 0;
}
static int cam_cpastop_reset_irq(struct cam_hw_info *cpas_hw)
{
int i;
cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
&camnoc_info->irq_sbm->sbm_clear);
for (i = 0; i < camnoc_info->irq_err_size; i++) {
if (camnoc_info->irq_err[i].enable)
cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
&camnoc_info->irq_err[i].err_clear);
}
cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
&camnoc_info->irq_sbm->sbm_enable);
for (i = 0; i < camnoc_info->irq_err_size; i++) {
if (camnoc_info->irq_err[i].enable)
cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
&camnoc_info->irq_err[i].err_enable);
}
return 0;
}
static void cam_cpastop_notify_clients(struct cam_cpas *cpas_core,
struct cam_cpas_irq_data *irq_data)
{
int i;
struct cam_cpas_client *cpas_client;
bool error_handled = false;
CAM_DBG(CAM_CPAS,
"Notify CB : num_clients=%d, registered=%d, started=%d",
cpas_core->num_clients, cpas_core->registered_clients,
cpas_core->streamon_clients);
for (i = 0; i < cpas_core->num_clients; i++) {
if (CAM_CPAS_CLIENT_STARTED(cpas_core, i)) {
cpas_client = cpas_core->cpas_client[i];
if (cpas_client->data.cam_cpas_client_cb) {
CAM_DBG(CAM_CPAS,
"Calling client CB %d : %d",
i, irq_data->irq_type);
error_handled =
cpas_client->data.cam_cpas_client_cb(
cpas_client->data.client_handle,
cpas_client->data.userdata,
irq_data);
if (error_handled)
break;
}
}
}
}
static void cam_cpastop_work(struct work_struct *work)
{
struct cam_cpas_work_payload *payload;
struct cam_hw_info *cpas_hw;
struct cam_cpas *cpas_core;
struct cam_hw_soc_info *soc_info;
int i;
enum cam_camnoc_hw_irq_type irq_type;
struct cam_cpas_irq_data irq_data;
payload = container_of(work, struct cam_cpas_work_payload, work);
if (!payload) {
CAM_ERR(CAM_CPAS, "NULL payload");
return;
}
cpas_hw = payload->hw;
cpas_core = (struct cam_cpas *) cpas_hw->core_info;
soc_info = &cpas_hw->soc_info;
if (!atomic_inc_not_zero(&cpas_core->irq_count)) {
CAM_ERR(CAM_CPAS, "CPAS off");
return;
}
for (i = 0; i < camnoc_info->irq_err_size; i++) {
if ((payload->irq_status & camnoc_info->irq_err[i].sbm_port) &&
(camnoc_info->irq_err[i].enable)) {
irq_type = camnoc_info->irq_err[i].irq_type;
CAM_ERR_RATE_LIMIT(CAM_CPAS,
"Error occurred, type=%d", irq_type);
memset(&irq_data, 0x0, sizeof(irq_data));
irq_data.irq_type = (enum cam_camnoc_irq_type)irq_type;
switch (irq_type) {
case CAM_CAMNOC_HW_IRQ_SLAVE_ERROR:
cam_cpastop_handle_errlogger(
cpas_core, soc_info,
&irq_data.u.slave_err);
break;
case CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR:
case CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR:
case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR:
cam_cpastop_handle_ubwc_enc_err(
cpas_core, soc_info, i,
&irq_data.u.enc_err);
break;
case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
cam_cpastop_handle_ubwc_dec_err(
cpas_core, soc_info, i,
&irq_data.u.dec_err);
break;
case CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT:
cam_cpastop_handle_ahb_timeout_err(
cpas_hw, &irq_data.u.ahb_err);
break;
case CAM_CAMNOC_HW_IRQ_CAMNOC_TEST:
CAM_DBG(CAM_CPAS, "TEST IRQ");
break;
default:
CAM_ERR(CAM_CPAS, "Invalid IRQ type");
break;
}
cam_cpastop_notify_clients(cpas_core, &irq_data);
payload->irq_status &=
~camnoc_info->irq_err[i].sbm_port;
}
}
atomic_dec(&cpas_core->irq_count);
wake_up(&cpas_core->irq_count_wq);
CAM_DBG(CAM_CPAS, "irq_count=%d\n", atomic_read(&cpas_core->irq_count));
if (payload->irq_status)
CAM_ERR(CAM_CPAS, "IRQ not handled irq_status=0x%x",
payload->irq_status);
kfree(payload);
}
static irqreturn_t cam_cpastop_handle_irq(int irq_num, void *data)
{
struct cam_hw_info *cpas_hw = (struct cam_hw_info *)data;
struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
struct cam_cpas_work_payload *payload;
if (!atomic_inc_not_zero(&cpas_core->irq_count)) {
CAM_ERR(CAM_CPAS, "CPAS off");
return IRQ_HANDLED;
}
payload = kzalloc(sizeof(struct cam_cpas_work_payload), GFP_ATOMIC);
if (!payload)
goto done;
payload->irq_status = cam_io_r_mb(
soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->irq_sbm->sbm_status.offset);
CAM_DBG(CAM_CPAS, "IRQ callback, irq_status=0x%x", payload->irq_status);
payload->hw = cpas_hw;
INIT_WORK((struct work_struct *)&payload->work, cam_cpastop_work);
if (TEST_IRQ_ENABLE)
cam_cpastop_disable_test_irq(cpas_hw);
cam_cpastop_reset_irq(cpas_hw);
queue_work(cpas_core->work_queue, &payload->work);
done:
atomic_dec(&cpas_core->irq_count);
wake_up(&cpas_core->irq_count_wq);
return IRQ_HANDLED;
}
static int cam_cpastop_poweron(struct cam_hw_info *cpas_hw)
{
int i;
cam_cpastop_reset_irq(cpas_hw);
for (i = 0; i < camnoc_info->specific_size; i++) {
if (camnoc_info->specific[i].enable) {
cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
&camnoc_info->specific[i].priority_lut_low);
cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
&camnoc_info->specific[i].priority_lut_high);
cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
&camnoc_info->specific[i].urgency);
cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
&camnoc_info->specific[i].danger_lut);
cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
&camnoc_info->specific[i].safe_lut);
cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
&camnoc_info->specific[i].ubwc_ctl);
cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
&camnoc_info->specific[i].flag_out_set0_low);
}
}
return 0;
}
static int cam_cpastop_poweroff(struct cam_hw_info *cpas_hw)
{
struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
int rc = 0;
struct cam_cpas_hw_errata_wa_list *errata_wa_list =
camnoc_info->errata_wa_list;
if (!errata_wa_list)
return 0;
if (errata_wa_list->camnoc_flush_slave_pending_trans.enable) {
struct cam_cpas_hw_errata_wa *errata_wa =
&errata_wa_list->camnoc_flush_slave_pending_trans;
rc = cam_io_poll_value_wmask(
soc_info->reg_map[camnoc_index].mem_base +
errata_wa->data.reg_info.offset,
errata_wa->data.reg_info.value,
errata_wa->data.reg_info.mask,
CAM_CPAS_POLL_RETRY_CNT,
CAM_CPAS_POLL_MIN_USECS, CAM_CPAS_POLL_MAX_USECS);
if (rc) {
CAM_DBG(CAM_CPAS,
"camnoc flush slave pending trans failed");
/* Do not return error, passthrough */
rc = 0;
}
}
return rc;
}
static int cam_cpastop_init_hw_version(struct cam_hw_info *cpas_hw,
struct cam_cpas_hw_caps *hw_caps)
{
int rc = 0;
struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
CAM_DBG(CAM_CPAS,
"hw_version=0x%x Camera Version %d.%d.%d, cpas version %d.%d.%d",
soc_info->hw_version,
hw_caps->camera_version.major,
hw_caps->camera_version.minor,
hw_caps->camera_version.incr,
hw_caps->cpas_version.major,
hw_caps->cpas_version.minor,
hw_caps->cpas_version.incr);
switch (soc_info->hw_version) {
case CAM_CPAS_TITAN_170_V100:
camnoc_info = &cam170_cpas100_camnoc_info;
break;
case CAM_CPAS_TITAN_170_V110:
camnoc_info = &cam170_cpas110_camnoc_info;
break;
case CAM_CPAS_TITAN_175_V100:
camnoc_info = &cam175_cpas100_camnoc_info;
break;
case CAM_CPAS_TITAN_175_V101:
camnoc_info = &cam175_cpas101_camnoc_info;
break;
case CAM_CPAS_TITAN_150_V100:
camnoc_info = &cam150_cpas100_camnoc_info;
break;
default:
CAM_ERR(CAM_CPAS, "Camera Version not supported %d.%d.%d",
hw_caps->camera_version.major,
hw_caps->camera_version.minor,
hw_caps->camera_version.incr);
rc = -EINVAL;
break;
}
return 0;
}
int cam_cpastop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops)
{
if (!internal_ops) {
CAM_ERR(CAM_CPAS, "invalid NULL param");
return -EINVAL;
}
internal_ops->get_hw_info = cam_cpastop_get_hw_info;
internal_ops->init_hw_version = cam_cpastop_init_hw_version;
internal_ops->handle_irq = cam_cpastop_handle_irq;
internal_ops->setup_regbase = cam_cpastop_setup_regbase_indices;
internal_ops->power_on = cam_cpastop_poweron;
internal_ops->power_off = cam_cpastop_poweroff;
return 0;
}

View file

@ -0,0 +1,234 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_CPASTOP_HW_H_
#define _CAM_CPASTOP_HW_H_
#include "cam_cpas_api.h"
#include "cam_cpas_hw.h"
/**
* enum cam_camnoc_hw_irq_type - Enum for camnoc error types
*
* @CAM_CAMNOC_HW_IRQ_SLAVE_ERROR: Each slave port in CAMNOC (3 QSB ports and
* 1 QHB port) has an error logger. The error
* observed at any slave port is logged into
* the error logger register and an IRQ is
* triggered
* @CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR : Triggered if any error
* detected in the IFE0 UBWC
* encoder instance
* @CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR : Triggered if any error
* detected in the IFE1 or IFE3
* UBWC encoder instance
* @CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR: Triggered if any error
* detected in the IPE/BPS
* UBWC decoder instance
* @CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR: Triggered if any error
* detected in the IPE/BPS UBWC
* encoder instance
* @CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT : Triggered when the QHS_ICP
* slave times out after 4000
* AHB cycles
* @CAM_CAMNOC_HW_IRQ_RESERVED1 : Reserved
* @CAM_CAMNOC_HW_IRQ_RESERVED2 : Reserved
* @CAM_CAMNOC_HW_IRQ_CAMNOC_TEST : To test the IRQ logic
*/
enum cam_camnoc_hw_irq_type {
CAM_CAMNOC_HW_IRQ_SLAVE_ERROR =
CAM_CAMNOC_IRQ_SLAVE_ERROR,
CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR =
CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR,
CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR =
CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR,
CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR =
CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR =
CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT =
CAM_CAMNOC_IRQ_AHB_TIMEOUT,
CAM_CAMNOC_HW_IRQ_RESERVED1,
CAM_CAMNOC_HW_IRQ_RESERVED2,
CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
};
/**
* enum cam_camnoc_port_type - Enum for different camnoc hw ports. All CAMNOC
* settings like QoS, LUT mappings need to be configured for
* each of these ports.
*
* @CAM_CAMNOC_CDM: Indicates CDM HW connection to camnoc
* @CAM_CAMNOC_IFE02: Indicates IFE0, IFE2 HW connection to camnoc
* @CAM_CAMNOC_IFE13: Indicates IFE1, IFE3 HW connection to camnoc
* @CAM_CAMNOC_IPE_BPS_LRME_READ: Indicates IPE, BPS, LRME Read HW
* connection to camnoc
* @CAM_CAMNOC_IPE_BPS_LRME_WRITE: Indicates IPE, BPS, LRME Write HW
* connection to camnoc
* @CAM_CAMNOC_JPEG: Indicates JPEG HW connection to camnoc
* @CAM_CAMNOC_FD: Indicates FD HW connection to camnoc
* @CAM_CAMNOC_ICP: Indicates ICP HW connection to camnoc
*/
enum cam_camnoc_port_type {
CAM_CAMNOC_CDM,
CAM_CAMNOC_IFE02,
CAM_CAMNOC_IFE13,
CAM_CAMNOC_IPE_BPS_LRME_READ,
CAM_CAMNOC_IPE_BPS_LRME_WRITE,
CAM_CAMNOC_JPEG,
CAM_CAMNOC_FD,
CAM_CAMNOC_ICP,
};
/**
* struct cam_camnoc_specific : CPAS camnoc specific settings
*
* @port_type: Port type
* @enable: Whether to enable settings for this connection
* @priority_lut_low: Priority Low LUT mapping for this connection
* @priority_lut_high: Priority High LUT mapping for this connection
* @urgency: Urgency (QoS) settings for this connection
* @danger_lut: Danger LUT mapping for this connection
* @safe_lut: Safe LUT mapping for this connection
* @ubwc_ctl: UBWC control settings for this connection
*
*/
struct cam_camnoc_specific {
enum cam_camnoc_port_type port_type;
bool enable;
struct cam_cpas_reg priority_lut_low;
struct cam_cpas_reg priority_lut_high;
struct cam_cpas_reg urgency;
struct cam_cpas_reg danger_lut;
struct cam_cpas_reg safe_lut;
struct cam_cpas_reg ubwc_ctl;
struct cam_cpas_reg flag_out_set0_low;
};
/**
* struct cam_camnoc_irq_sbm : Sideband manager settings for all CAMNOC IRQs
*
* @sbm_enable: SBM settings for IRQ enable
* @sbm_status: SBM settings for IRQ status
* @sbm_clear: SBM settings for IRQ clear
*
*/
struct cam_camnoc_irq_sbm {
struct cam_cpas_reg sbm_enable;
struct cam_cpas_reg sbm_status;
struct cam_cpas_reg sbm_clear;
};
/**
* struct cam_camnoc_irq_err : Error settings specific to each CAMNOC IRQ
*
* @irq_type: Type of IRQ
* @enable: Whether to enable error settings for this IRQ
* @sbm_port: Corresponding SBM port for this IRQ
* @err_enable: Error enable settings for this IRQ
* @err_status: Error status settings for this IRQ
* @err_clear: Error clear settings for this IRQ
*
*/
struct cam_camnoc_irq_err {
enum cam_camnoc_hw_irq_type irq_type;
bool enable;
uint32_t sbm_port;
struct cam_cpas_reg err_enable;
struct cam_cpas_reg err_status;
struct cam_cpas_reg err_clear;
};
/**
* struct cam_cpas_hw_errata_wa : Struct for HW errata workaround info
*
* @enable: Whether to enable this errata workround
* @data: HW Errata workaround data
*
*/
struct cam_cpas_hw_errata_wa {
bool enable;
union {
struct cam_cpas_reg reg_info;
} data;
};
/**
* struct cam_cpas_hw_errata_wa_list : List of HW Errata workaround info
*
* @camnoc_flush_slave_pending_trans: Errata workaround info for flushing
* camnoc slave pending transactions before turning off CPAS_TOP gdsc
*
*/
struct cam_cpas_hw_errata_wa_list {
struct cam_cpas_hw_errata_wa camnoc_flush_slave_pending_trans;
};
/**
* struct cam_camnoc_err_logger_info : CAMNOC error logger register offsets
*
* @mainctrl: Register offset for mainctrl
* @errvld: Register offset for errvld
* @errlog0_low: Register offset for errlog0_low
* @errlog0_high: Register offset for errlog0_high
* @errlog1_low: Register offset for errlog1_low
* @errlog1_high: Register offset for errlog1_high
* @errlog2_low: Register offset for errlog2_low
* @errlog2_high: Register offset for errlog2_high
* @errlog3_low: Register offset for errlog3_low
* @errlog3_high: Register offset for errlog3_high
*
*/
struct cam_camnoc_err_logger_info {
uint32_t mainctrl;
uint32_t errvld;
uint32_t errlog0_low;
uint32_t errlog0_high;
uint32_t errlog1_low;
uint32_t errlog1_high;
uint32_t errlog2_low;
uint32_t errlog2_high;
uint32_t errlog3_low;
uint32_t errlog3_high;
};
/**
* struct cam_camnoc_info : Overall CAMNOC settings info
*
* @specific: Pointer to CAMNOC SPECIFICTONTTPTR settings
* @specific_size: Array size of SPECIFICTONTTPTR settings
* @irq_sbm: Pointer to CAMNOC IRQ SBM settings
* @irq_err: Pointer to CAMNOC IRQ Error settings
* @irq_err_size: Array size of IRQ Error settings
* @err_logger: Pointer to CAMNOC IRQ Error logger read registers
* @errata_wa_list: HW Errata workaround info
*
*/
struct cam_camnoc_info {
struct cam_camnoc_specific *specific;
int specific_size;
struct cam_camnoc_irq_sbm *irq_sbm;
struct cam_camnoc_irq_err *irq_err;
int irq_err_size;
struct cam_camnoc_err_logger_info *err_logger;
struct cam_cpas_hw_errata_wa_list *errata_wa_list;
};
/**
* struct cam_cpas_work_payload : Struct for cpas work payload data
*
* @hw: Pointer to HW info
* @irq_status: IRQ status value
* @irq_data: IRQ data
* @work: Work handle
*
*/
struct cam_cpas_work_payload {
struct cam_hw_info *hw;
uint32_t irq_status;
uint32_t irq_data;
struct work_struct work;
};
#endif /* _CAM_CPASTOP_HW_H_ */

View file

@ -0,0 +1,531 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CPASTOP100_H_
#define _CPASTOP100_H_
#define TEST_IRQ_ENABLE 0
static struct cam_camnoc_irq_sbm cam_cpas100_irq_sbm = {
.sbm_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x2040, /* SBM_FAULTINEN0_LOW */
.value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
(TEST_IRQ_ENABLE ?
0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
0x0),
},
.sbm_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */
},
.sbm_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */
.value = TEST_IRQ_ENABLE ? 0x6 : 0x2,
}
};
static struct cam_camnoc_irq_err
cam_cpas100_irq_err[] = {
{
.irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
.enable = true,
.sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
.value = 1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
},
.err_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */
.value = 1,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR,
.enable = true,
.sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */
.value = 1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */
},
.err_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */
.value = 1,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR,
.enable = true,
.sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */
.value = 1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */
},
.err_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */
.value = 1,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
.enable = true,
.sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */
.value = 1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */
},
.err_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */
.value = 1,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
.enable = true,
.sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
.value = 1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x1190,
/* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */
},
.err_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */
.value = 1,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
.enable = true,
.sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
.value = 0x1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
},
.err_clear = {
.enable = false,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
.enable = false,
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
.enable = false,
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
.enable = TEST_IRQ_ENABLE ? true : false,
.sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
.value = 0x5,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
},
.err_clear = {
.enable = false,
},
},
};
static struct cam_camnoc_specific
cam_cpas100_camnoc_specific[] = {
{
.port_type = CAM_CAMNOC_CDM,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */
.value = 0x22222222,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */
.value = 0x22222222,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 1,
.offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */
.mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */
.shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */
.value = 0,
},
.danger_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */
.value = 0x0,
},
.safe_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */
.value = 0x0,
},
.ubwc_ctl = {
.enable = false,
},
},
{
.port_type = CAM_CAMNOC_IFE02,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */
.value = 0x66665433,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */
.value = 0x66666666,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 1,
.offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */
/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */
.mask = 0x70,
/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */
.shift = 0x4,
.value = 3,
},
.danger_lut = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */
.value = 0xFFFFFF00,
},
.safe_lut = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
.value = 0x3,
},
.ubwc_ctl = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x588, /* SPECIFIC_IFE02_ENCCTL_LOW */
.value = 1,
},
},
{
.port_type = CAM_CAMNOC_IFE13,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */
.value = 0x66665433,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */
.value = 0x66666666,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 1,
.offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */
/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */
.mask = 0x70,
/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */
.shift = 0x4,
.value = 3,
},
.danger_lut = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */
.value = 0xFFFFFF00,
},
.safe_lut = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
.value = 0x3,
},
.ubwc_ctl = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x988, /* SPECIFIC_IFE13_ENCCTL_LOW */
.value = 1,
},
},
{
.port_type = CAM_CAMNOC_IPE_BPS_LRME_READ,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */
.value = 0x33333333,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */
.value = 0x33333333,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 1,
.offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */
/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */
.mask = 0x7,
/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */
.shift = 0x0,
.value = 3,
},
.danger_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */
.value = 0x0,
},
.safe_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */
.value = 0x0,
},
.ubwc_ctl = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */
.value = 1,
},
},
{
.port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */
.value = 0x33333333,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */
.value = 0x33333333,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 1,
.offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */
/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */
.mask = 0x70,
/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */
.shift = 0x4,
.value = 3,
},
.danger_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */
.value = 0x0,
},
.safe_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */
.value = 0x0,
},
.ubwc_ctl = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */
.value = 1,
},
},
{
.port_type = CAM_CAMNOC_JPEG,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */
.value = 0x22222222,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */
.value = 0x22222222,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */
.value = 0x22,
},
.danger_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */
.value = 0x0,
},
.safe_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */
.value = 0x0,
},
.ubwc_ctl = {
.enable = false,
},
},
{
.port_type = CAM_CAMNOC_FD,
.enable = false,
},
{
.port_type = CAM_CAMNOC_ICP,
.enable = false,
}
};
static struct cam_camnoc_err_logger_info cam170_cpas100_err_logger_offsets = {
.mainctrl = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
.errvld = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
.errlog0_low = 0x2720, /* ERRLOGGER_ERRLOG0_LOW */
.errlog0_high = 0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
.errlog1_low = 0x2728, /* ERRLOGGER_ERRLOG1_LOW */
.errlog1_high = 0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
.errlog2_low = 0x2730, /* ERRLOGGER_ERRLOG2_LOW */
.errlog2_high = 0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
.errlog3_low = 0x2738, /* ERRLOGGER_ERRLOG3_LOW */
.errlog3_high = 0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
};
static struct cam_cpas_hw_errata_wa_list cam170_cpas100_errata_wa_list = {
.camnoc_flush_slave_pending_trans = {
.enable = true,
.data.reg_info = {
.access_type = CAM_REG_TYPE_READ,
.offset = 0x2100, /* SidebandManager_SenseIn0_Low */
.mask = 0xE0000, /* Bits 17, 18, 19 */
.value = 0, /* expected to be 0 */
},
},
};
struct cam_camnoc_info cam170_cpas100_camnoc_info = {
.specific = &cam_cpas100_camnoc_specific[0],
.specific_size = sizeof(cam_cpas100_camnoc_specific) /
sizeof(cam_cpas100_camnoc_specific[0]),
.irq_sbm = &cam_cpas100_irq_sbm,
.irq_err = &cam_cpas100_irq_err[0],
.irq_err_size = sizeof(cam_cpas100_irq_err) /
sizeof(cam_cpas100_irq_err[0]),
.err_logger = &cam170_cpas100_err_logger_offsets,
.errata_wa_list = &cam170_cpas100_errata_wa_list,
};
#endif /* _CPASTOP100_H_ */

View file

@ -0,0 +1,530 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CPASTOP_V150_100_H_
#define _CPASTOP_V150_100_H_
#define TEST_IRQ_ENABLE 0
static struct cam_camnoc_irq_sbm cam_cpas_v150_100_irq_sbm = {
.sbm_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x2040, /* SBM_FAULTINEN0_LOW */
.value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
(TEST_IRQ_ENABLE ?
0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
0x0),
},
.sbm_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */
},
.sbm_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */
.value = TEST_IRQ_ENABLE ? 0x6 : 0x2,
}
};
static struct cam_camnoc_irq_err
cam_cpas_v150_100_irq_err[] = {
{
.irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
.enable = true,
.sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
.value = 1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
},
.err_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */
.value = 1,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR,
.enable = true,
.sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */
.value = 1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */
},
.err_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */
.value = 1,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR,
.enable = true,
.sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */
.value = 1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */
},
.err_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */
.value = 1,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
.enable = true,
.sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */
.value = 1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */
},
.err_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */
.value = 1,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
.enable = true,
.sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
.value = 1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x1190,
/* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */
},
.err_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */
.value = 1,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
.enable = true,
.sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
.value = 0x1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
},
.err_clear = {
.enable = false,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
.enable = false,
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
.enable = false,
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
.enable = TEST_IRQ_ENABLE ? true : false,
.sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
.value = 0x5,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
},
.err_clear = {
.enable = false,
},
},
};
static struct cam_camnoc_specific
cam_cpas_v150_100_camnoc_specific[] = {
{
.port_type = CAM_CAMNOC_CDM,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */
.value = 0x22222222,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */
.value = 0x22222222,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 1,
.offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */
.mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */
.shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */
.value = 0x2,
},
.danger_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */
.value = 0x0,
},
.safe_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */
.value = 0x0,
},
.ubwc_ctl = {
.enable = false,
},
},
{
.port_type = CAM_CAMNOC_IFE02,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */
.value = 0x66665433,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */
.value = 0x66666666,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 1,
.offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */
/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */
.mask = 0x70,
/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */
.shift = 0x4,
.value = 3,
},
.danger_lut = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */
.value = 0xFFFFFF00,
},
.safe_lut = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
.value = 0x1,
},
.ubwc_ctl = {
.enable = false,
},
},
{
.port_type = CAM_CAMNOC_IFE13,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */
.value = 0x66665433,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */
.value = 0x66666666,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 1,
.offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */
/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */
.mask = 0x70,
/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */
.shift = 0x4,
.value = 3,
},
.danger_lut = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */
.value = 0xFFFFFF00,
},
.safe_lut = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
.value = 0x1,
},
.ubwc_ctl = {
.enable = false,
},
},
{
.port_type = CAM_CAMNOC_IPE_BPS_LRME_READ,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */
.value = 0x33333333,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */
.value = 0x33333333,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 1,
.offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */
/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */
.mask = 0x7,
/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */
.shift = 0x0,
.value = 3,
},
.danger_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */
.value = 0x0,
},
.safe_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */
.value = 0x0,
},
.ubwc_ctl = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */
.value = 1,
},
},
{
.port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */
.value = 0x33333333,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */
.value = 0x33333333,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 1,
.offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */
/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */
.mask = 0x70,
/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */
.shift = 0x4,
.value = 3,
},
.danger_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */
.value = 0x0,
},
.safe_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */
.value = 0x0,
},
.ubwc_ctl = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */
.value = 0x5,
},
},
{
.port_type = CAM_CAMNOC_JPEG,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */
.value = 0x22222222,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */
.value = 0x22222222,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */
.value = 0x22,
},
.danger_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */
.value = 0x0,
},
.safe_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */
.value = 0x0,
},
.ubwc_ctl = {
.enable = false,
},
},
{
.port_type = CAM_CAMNOC_FD,
.enable = false,
},
{
.port_type = CAM_CAMNOC_ICP,
.enable = true,
.flag_out_set0_low = {
.enable = true,
.access_type = CAM_REG_TYPE_WRITE,
.masked_value = 0,
.offset = 0x2088,
.value = 0x100000,
},
},
};
static struct cam_camnoc_err_logger_info cam150_cpas100_err_logger_offsets = {
.mainctrl = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
.errvld = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
.errlog0_low = 0x2720, /* ERRLOGGER_ERRLOG0_LOW */
.errlog0_high = 0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
.errlog1_low = 0x2728, /* ERRLOGGER_ERRLOG1_LOW */
.errlog1_high = 0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
.errlog2_low = 0x2730, /* ERRLOGGER_ERRLOG2_LOW */
.errlog2_high = 0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
.errlog3_low = 0x2738, /* ERRLOGGER_ERRLOG3_LOW */
.errlog3_high = 0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
};
static struct cam_cpas_hw_errata_wa_list cam150_cpas100_errata_wa_list = {
.camnoc_flush_slave_pending_trans = {
.enable = false,
.data.reg_info = {
.access_type = CAM_REG_TYPE_READ,
.offset = 0x2100, /* SidebandManager_SenseIn0_Low */
.mask = 0xE0000, /* Bits 17, 18, 19 */
.value = 0, /* expected to be 0 */
},
},
};
static struct cam_camnoc_info cam150_cpas100_camnoc_info = {
.specific = &cam_cpas_v150_100_camnoc_specific[0],
.specific_size = sizeof(cam_cpas_v150_100_camnoc_specific) /
sizeof(cam_cpas_v150_100_camnoc_specific[0]),
.irq_sbm = &cam_cpas_v150_100_irq_sbm,
.irq_err = &cam_cpas_v150_100_irq_err[0],
.irq_err_size = sizeof(cam_cpas_v150_100_irq_err) /
sizeof(cam_cpas_v150_100_irq_err[0]),
.err_logger = &cam150_cpas100_err_logger_offsets,
.errata_wa_list = &cam150_cpas100_errata_wa_list,
};
#endif /* _CPASTOP_V150_100_H_ */

View file

@ -0,0 +1,538 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CPASTOP_V170_110_H_
#define _CPASTOP_V170_110_H_
#define TEST_IRQ_ENABLE 0
static struct cam_camnoc_irq_sbm cam_cpas110_irq_sbm = {
.sbm_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x2040, /* SBM_FAULTINEN0_LOW */
.value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
(TEST_IRQ_ENABLE ?
0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
0x0),
},
.sbm_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */
},
.sbm_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */
.value = TEST_IRQ_ENABLE ? 0x6 : 0x2,
}
};
static struct cam_camnoc_irq_err
cam_cpas110_irq_err[] = {
{
.irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
.enable = true,
.sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
.value = 1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
},
.err_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */
.value = 1,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR,
.enable = true,
.sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */
.value = 1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */
},
.err_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */
.value = 1,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR,
.enable = true,
.sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */
.value = 1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */
},
.err_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */
.value = 1,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
.enable = true,
.sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */
.value = 1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */
},
.err_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */
.value = 1,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
.enable = true,
.sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
.value = 1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x1190,
/* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */
},
.err_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */
.value = 1,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
.enable = true,
.sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
.value = 0x1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
},
.err_clear = {
.enable = false,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
.enable = false,
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
.enable = false,
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
.enable = TEST_IRQ_ENABLE ? true : false,
.sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
.value = 0x5,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
},
.err_clear = {
.enable = false,
},
},
};
static struct cam_camnoc_specific
cam_cpas110_camnoc_specific[] = {
{
.port_type = CAM_CAMNOC_CDM,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */
.value = 0x22222222,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */
.value = 0x22222222,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 1,
.offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */
.mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */
.shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */
.value = 0x2,
},
.danger_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */
.value = 0x0,
},
.safe_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */
.value = 0x0,
},
.ubwc_ctl = {
.enable = false,
},
},
{
.port_type = CAM_CAMNOC_IFE02,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */
.value = 0x66666543,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */
.value = 0x66666666,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 1,
.offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */
/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */
.mask = 0x70,
/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */
.shift = 0x4,
.value = 3,
},
.danger_lut = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */
.value = 0xFFFFFF00,
},
.safe_lut = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
.value = 0x1,
},
.ubwc_ctl = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x588, /* SPECIFIC_IFE02_ENCCTL_LOW */
.value = 1,
},
},
{
.port_type = CAM_CAMNOC_IFE13,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */
.value = 0x66666543,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */
.value = 0x66666666,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 1,
.offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */
/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */
.mask = 0x70,
/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */
.shift = 0x4,
.value = 3,
},
.danger_lut = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */
.value = 0xFFFFFF00,
},
.safe_lut = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
.value = 0x1,
},
.ubwc_ctl = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x988, /* SPECIFIC_IFE13_ENCCTL_LOW */
.value = 1,
},
},
{
.port_type = CAM_CAMNOC_IPE_BPS_LRME_READ,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */
.value = 0x33333333,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */
.value = 0x33333333,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 1,
.offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */
/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */
.mask = 0x7,
/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */
.shift = 0x0,
.value = 3,
},
.danger_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */
.value = 0x0,
},
.safe_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */
.value = 0x0,
},
.ubwc_ctl = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */
.value = 1,
},
},
{
.port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */
.value = 0x33333333,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */
.value = 0x33333333,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 1,
.offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */
/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */
.mask = 0x70,
/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */
.shift = 0x4,
.value = 3,
},
.danger_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */
.value = 0x0,
},
.safe_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */
.value = 0x0,
},
.ubwc_ctl = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */
.value = 1,
},
},
{
.port_type = CAM_CAMNOC_JPEG,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */
.value = 0x22222222,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */
.value = 0x22222222,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */
.value = 0x22,
},
.danger_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */
.value = 0x0,
},
.safe_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */
.value = 0x0,
},
.ubwc_ctl = {
.enable = false,
},
},
{
.port_type = CAM_CAMNOC_FD,
.enable = false,
},
{
.port_type = CAM_CAMNOC_ICP,
.enable = true,
.flag_out_set0_low = {
.enable = true,
.access_type = CAM_REG_TYPE_WRITE,
.masked_value = 0,
.offset = 0x2088,
.value = 0x100000,
},
},
};
static struct cam_camnoc_err_logger_info cam170_cpas110_err_logger_offsets = {
.mainctrl = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
.errvld = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
.errlog0_low = 0x2720, /* ERRLOGGER_ERRLOG0_LOW */
.errlog0_high = 0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
.errlog1_low = 0x2728, /* ERRLOGGER_ERRLOG1_LOW */
.errlog1_high = 0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
.errlog2_low = 0x2730, /* ERRLOGGER_ERRLOG2_LOW */
.errlog2_high = 0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
.errlog3_low = 0x2738, /* ERRLOGGER_ERRLOG3_LOW */
.errlog3_high = 0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
};
static struct cam_cpas_hw_errata_wa_list cam170_cpas110_errata_wa_list = {
.camnoc_flush_slave_pending_trans = {
.enable = false,
.data.reg_info = {
.access_type = CAM_REG_TYPE_READ,
.offset = 0x2100, /* SidebandManager_SenseIn0_Low */
.mask = 0xE0000, /* Bits 17, 18, 19 */
.value = 0, /* expected to be 0 */
},
},
};
static struct cam_camnoc_info cam170_cpas110_camnoc_info = {
.specific = &cam_cpas110_camnoc_specific[0],
.specific_size = sizeof(cam_cpas110_camnoc_specific) /
sizeof(cam_cpas110_camnoc_specific[0]),
.irq_sbm = &cam_cpas110_irq_sbm,
.irq_err = &cam_cpas110_irq_err[0],
.irq_err_size = sizeof(cam_cpas110_irq_err) /
sizeof(cam_cpas110_irq_err[0]),
.err_logger = &cam170_cpas110_err_logger_offsets,
.errata_wa_list = &cam170_cpas110_errata_wa_list,
};
#endif /* _CPASTOP_V170_110_H_ */

View file

@ -0,0 +1,558 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CPASTOP_V175_100_H_
#define _CPASTOP_V175_100_H_
#define TEST_IRQ_ENABLE 0
static struct cam_camnoc_irq_sbm cam_cpas_v175_100_irq_sbm = {
.sbm_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x2040, /* SBM_FAULTINEN0_LOW */
.value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
(TEST_IRQ_ENABLE ?
0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
0x0),
},
.sbm_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */
},
.sbm_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */
.value = TEST_IRQ_ENABLE ? 0x6 : 0x2,
}
};
static struct cam_camnoc_irq_err
cam_cpas_v175_100_irq_err[] = {
{
.irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
.enable = true,
.sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
.value = 1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
},
.err_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */
.value = 1,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR,
.enable = true,
.sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */
.value = 1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */
},
.err_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */
.value = 1,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR,
.enable = true,
.sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */
.value = 1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */
},
.err_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */
.value = 1,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
.enable = true,
.sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */
.value = 1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */
},
.err_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */
.value = 1,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
.enable = true,
.sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
.value = 1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x1190,
/* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */
},
.err_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */
.value = 1,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
.enable = true,
.sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
.value = 0x1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
},
.err_clear = {
.enable = false,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
.enable = false,
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
.enable = false,
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
.enable = TEST_IRQ_ENABLE ? true : false,
.sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
.value = 0x5,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
},
.err_clear = {
.enable = false,
},
},
};
static struct cam_camnoc_specific
cam_cpas_v175_100_camnoc_specific[] = {
{
.port_type = CAM_CAMNOC_CDM,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */
.value = 0x22222222,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */
.value = 0x22222222,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 1,
.offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */
.mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */
.shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */
.value = 0x2,
},
.danger_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */
.value = 0x0,
},
.safe_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */
.value = 0x0,
},
.ubwc_ctl = {
.enable = false,
},
},
{
.port_type = CAM_CAMNOC_IFE02,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */
.value = 0x66666543,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */
.value = 0x66666666,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 1,
.offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */
/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */
.mask = 0x70,
/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */
.shift = 0x4,
.value = 3,
},
.danger_lut = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */
.value = 0xFFFFFF00,
},
.safe_lut = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
.value = 0x1,
},
.ubwc_ctl = {
/*
* Do not explicitly set ubwc config register.
* Power on default values are taking care of required
* register settings.
*/
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x588, /* SPECIFIC_IFE02_ENCCTL_LOW */
.value = 1,
},
},
{
.port_type = CAM_CAMNOC_IFE13,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */
.value = 0x66666543,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */
.value = 0x66666666,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 1,
.offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */
/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */
.mask = 0x70,
/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */
.shift = 0x4,
.value = 3,
},
.danger_lut = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */
.value = 0xFFFFFF00,
},
.safe_lut = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
.value = 0x1,
},
.ubwc_ctl = {
/*
* Do not explicitly set ubwc config register.
* Power on default values are taking care of required
* register settings.
*/
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x988, /* SPECIFIC_IFE13_ENCCTL_LOW */
.value = 1,
},
},
{
.port_type = CAM_CAMNOC_IPE_BPS_LRME_READ,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */
.value = 0x33333333,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */
.value = 0x33333333,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 1,
.offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */
/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */
.mask = 0x7,
/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */
.shift = 0x0,
.value = 3,
},
.danger_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */
.value = 0x0,
},
.safe_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */
.value = 0x0,
},
.ubwc_ctl = {
/*
* Do not explicitly set ubwc config register.
* Power on default values are taking care of required
* register settings.
*/
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */
.value = 1,
},
},
{
.port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */
.value = 0x33333333,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */
.value = 0x33333333,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 1,
.offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */
/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */
.mask = 0x70,
/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */
.shift = 0x4,
.value = 3,
},
.danger_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */
.value = 0x0,
},
.safe_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */
.value = 0x0,
},
.ubwc_ctl = {
/*
* Do not explicitly set ubwc config register.
* Power on default values are taking care of required
* register settings.
*/
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */
.value = 1,
},
},
{
.port_type = CAM_CAMNOC_JPEG,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */
.value = 0x22222222,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */
.value = 0x22222222,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */
.value = 0x22,
},
.danger_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */
.value = 0x0,
},
.safe_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */
.value = 0x0,
},
.ubwc_ctl = {
.enable = false,
},
},
{
.port_type = CAM_CAMNOC_FD,
.enable = false,
},
{
.port_type = CAM_CAMNOC_ICP,
.enable = true,
.flag_out_set0_low = {
.enable = true,
.access_type = CAM_REG_TYPE_WRITE,
.masked_value = 0,
.offset = 0x2088,
.value = 0x100000,
},
},
};
static struct cam_camnoc_err_logger_info cam175_cpas100_err_logger_offsets = {
.mainctrl = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
.errvld = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
.errlog0_low = 0x2720, /* ERRLOGGER_ERRLOG0_LOW */
.errlog0_high = 0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
.errlog1_low = 0x2728, /* ERRLOGGER_ERRLOG1_LOW */
.errlog1_high = 0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
.errlog2_low = 0x2730, /* ERRLOGGER_ERRLOG2_LOW */
.errlog2_high = 0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
.errlog3_low = 0x2738, /* ERRLOGGER_ERRLOG3_LOW */
.errlog3_high = 0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
};
static struct cam_cpas_hw_errata_wa_list cam175_cpas100_errata_wa_list = {
.camnoc_flush_slave_pending_trans = {
.enable = false,
.data.reg_info = {
.access_type = CAM_REG_TYPE_READ,
.offset = 0x2100, /* SidebandManager_SenseIn0_Low */
.mask = 0xE0000, /* Bits 17, 18, 19 */
.value = 0, /* expected to be 0 */
},
},
};
static struct cam_camnoc_info cam175_cpas100_camnoc_info = {
.specific = &cam_cpas_v175_100_camnoc_specific[0],
.specific_size = sizeof(cam_cpas_v175_100_camnoc_specific) /
sizeof(cam_cpas_v175_100_camnoc_specific[0]),
.irq_sbm = &cam_cpas_v175_100_irq_sbm,
.irq_err = &cam_cpas_v175_100_irq_err[0],
.irq_err_size = sizeof(cam_cpas_v175_100_irq_err) /
sizeof(cam_cpas_v175_100_irq_err[0]),
.err_logger = &cam175_cpas100_err_logger_offsets,
.errata_wa_list = &cam175_cpas100_errata_wa_list,
};
#endif /* _CPASTOP_V175_100_H_ */

View file

@ -0,0 +1,558 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CPASTOP_V175_101_H_
#define _CPASTOP_V175_101_H_
#define TEST_IRQ_ENABLE 0
static struct cam_camnoc_irq_sbm cam_cpas_v175_101_irq_sbm = {
.sbm_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x2040, /* SBM_FAULTINEN0_LOW */
.value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
(TEST_IRQ_ENABLE ?
0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
0x0),
},
.sbm_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */
},
.sbm_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */
.value = TEST_IRQ_ENABLE ? 0x6 : 0x2,
}
};
static struct cam_camnoc_irq_err
cam_cpas_v175_101_irq_err[] = {
{
.irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
.enable = true,
.sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
.value = 1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
},
.err_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */
.value = 1,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR,
.enable = true,
.sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */
.value = 1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */
},
.err_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */
.value = 1,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR,
.enable = true,
.sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */
.value = 1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */
},
.err_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */
.value = 1,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
.enable = true,
.sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */
.value = 1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */
},
.err_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */
.value = 1,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
.enable = true,
.sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
.value = 1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x1190,
/* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */
},
.err_clear = {
.access_type = CAM_REG_TYPE_WRITE,
.enable = true,
.offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */
.value = 1,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
.enable = true,
.sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
.value = 0x1,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
},
.err_clear = {
.enable = false,
},
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
.enable = false,
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
.enable = false,
},
{
.irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
.enable = TEST_IRQ_ENABLE ? true : false,
.sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
.err_enable = {
.access_type = CAM_REG_TYPE_READ_WRITE,
.enable = true,
.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
.value = 0x5,
},
.err_status = {
.access_type = CAM_REG_TYPE_READ,
.enable = true,
.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
},
.err_clear = {
.enable = false,
},
},
};
static struct cam_camnoc_specific
cam_cpas_v175_101_camnoc_specific[] = {
{
.port_type = CAM_CAMNOC_CDM,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */
.value = 0x22222222,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */
.value = 0x22222222,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 1,
.offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */
.mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */
.shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */
.value = 0x2,
},
.danger_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */
.value = 0x0,
},
.safe_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */
.value = 0x0,
},
.ubwc_ctl = {
.enable = false,
},
},
{
.port_type = CAM_CAMNOC_IFE02,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */
.value = 0x66666543,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */
.value = 0x66666666,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 1,
.offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */
/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */
.mask = 0x70,
/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */
.shift = 0x4,
.value = 3,
},
.danger_lut = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */
.value = 0xFFFFFF00,
},
.safe_lut = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
.value = 0x1,
},
.ubwc_ctl = {
/*
* Do not explicitly set ubwc config register.
* Power on default values are taking care of required
* register settings.
*/
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x588, /* SPECIFIC_IFE02_ENCCTL_LOW */
.value = 1,
},
},
{
.port_type = CAM_CAMNOC_IFE13,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */
.value = 0x66666543,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */
.value = 0x66666666,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 1,
.offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */
/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */
.mask = 0x70,
/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */
.shift = 0x4,
.value = 3,
},
.danger_lut = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */
.value = 0xFFFFFF00,
},
.safe_lut = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
.value = 0x1,
},
.ubwc_ctl = {
/*
* Do not explicitly set ubwc config register.
* Power on default values are taking care of required
* register settings.
*/
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x988, /* SPECIFIC_IFE13_ENCCTL_LOW */
.value = 1,
},
},
{
.port_type = CAM_CAMNOC_IPE_BPS_LRME_READ,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */
.value = 0x33333333,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */
.value = 0x33333333,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 1,
.offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */
/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */
.mask = 0x7,
/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */
.shift = 0x0,
.value = 3,
},
.danger_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */
.value = 0x0,
},
.safe_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */
.value = 0x0,
},
.ubwc_ctl = {
/*
* Do not explicitly set ubwc config register.
* Power on default values are taking care of required
* register settings.
*/
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */
.value = 1,
},
},
{
.port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */
.value = 0x33333333,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */
.value = 0x33333333,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 1,
.offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */
/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */
.mask = 0x70,
/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */
.shift = 0x4,
.value = 3,
},
.danger_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */
.value = 0x0,
},
.safe_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */
.value = 0x0,
},
.ubwc_ctl = {
/*
* Do not explicitly set ubwc config register.
* Power on default values are taking care of required
* register settings.
*/
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */
.value = 1,
},
},
{
.port_type = CAM_CAMNOC_JPEG,
.enable = true,
.priority_lut_low = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */
.value = 0x22222222,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */
.value = 0x22222222,
},
.urgency = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */
.value = 0x22,
},
.danger_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */
.value = 0x0,
},
.safe_lut = {
.enable = false,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */
.value = 0x0,
},
.ubwc_ctl = {
.enable = false,
},
},
{
.port_type = CAM_CAMNOC_FD,
.enable = false,
},
{
.port_type = CAM_CAMNOC_ICP,
.enable = true,
.flag_out_set0_low = {
.enable = true,
.access_type = CAM_REG_TYPE_WRITE,
.masked_value = 0,
.offset = 0x2088,
.value = 0x100000,
},
},
};
static struct cam_camnoc_err_logger_info cam175_cpas101_err_logger_offsets = {
.mainctrl = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
.errvld = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
.errlog0_low = 0x2720, /* ERRLOGGER_ERRLOG0_LOW */
.errlog0_high = 0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
.errlog1_low = 0x2728, /* ERRLOGGER_ERRLOG1_LOW */
.errlog1_high = 0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
.errlog2_low = 0x2730, /* ERRLOGGER_ERRLOG2_LOW */
.errlog2_high = 0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
.errlog3_low = 0x2738, /* ERRLOGGER_ERRLOG3_LOW */
.errlog3_high = 0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
};
static struct cam_cpas_hw_errata_wa_list cam175_cpas101_errata_wa_list = {
.camnoc_flush_slave_pending_trans = {
.enable = false,
.data.reg_info = {
.access_type = CAM_REG_TYPE_READ,
.offset = 0x2100, /* SidebandManager_SenseIn0_Low */
.mask = 0xE0000, /* Bits 17, 18, 19 */
.value = 0, /* expected to be 0 */
},
},
};
static struct cam_camnoc_info cam175_cpas101_camnoc_info = {
.specific = &cam_cpas_v175_101_camnoc_specific[0],
.specific_size = sizeof(cam_cpas_v175_101_camnoc_specific) /
sizeof(cam_cpas_v175_101_camnoc_specific[0]),
.irq_sbm = &cam_cpas_v175_101_irq_sbm,
.irq_err = &cam_cpas_v175_101_irq_err[0],
.irq_err_size = sizeof(cam_cpas_v175_101_irq_err) /
sizeof(cam_cpas_v175_101_irq_err[0]),
.err_logger = &cam175_cpas101_err_logger_offsets,
.errata_wa_list = &cam175_cpas101_errata_wa_list,
};
#endif /* _CPASTOP_V175_101_H_ */

View file

@ -0,0 +1,511 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_CPAS_API_H_
#define _CAM_CPAS_API_H_
#include <linux/device.h>
#include <linux/platform_device.h>
#include <media/cam_cpas.h>
#include "cam_soc_util.h"
#define CAM_HW_IDENTIFIER_LENGTH 128
/* Default AXI Bandwidth vote */
#define CAM_CPAS_DEFAULT_AXI_BW 1024
/**
* enum cam_cpas_reg_base - Enum for register base identifier. These
* are the identifiers used in generic register
* write/read APIs provided by cpas driver.
*/
enum cam_cpas_reg_base {
CAM_CPAS_REG_CPASTOP,
CAM_CPAS_REG_CAMNOC,
CAM_CPAS_REG_CAMSS,
CAM_CPAS_REG_MAX
};
/**
* enum cam_cpas_hw_version - Enum for Titan CPAS HW Versions
*/
enum cam_cpas_hw_version {
CAM_CPAS_TITAN_NONE = 0,
CAM_CPAS_TITAN_150_V100 = 0x150100,
CAM_CPAS_TITAN_170_V100 = 0x170100,
CAM_CPAS_TITAN_170_V110 = 0x170110,
CAM_CPAS_TITAN_170_V120 = 0x170120,
CAM_CPAS_TITAN_175_V100 = 0x175100,
CAM_CPAS_TITAN_175_V101 = 0x175101,
CAM_CPAS_TITAN_MAX
};
/**
* enum cam_camnoc_irq_type - Enum for camnoc irq types
*
* @CAM_CAMNOC_IRQ_SLAVE_ERROR: Each slave port in CAMNOC (3 QSB ports and
* 1 QHB port) has an error logger. The error
* observed at any slave port is logged into
* the error logger register and an IRQ is
* triggered
* @CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR : Triggered if any error detected
* in the IFE0 UBWC encoder instance
* @CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR : Triggered if any error detected
* in the IFE1 or IFE3 UBWC encoder
* instance
* @CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR: Triggered if any error detected
* in the IPE/BPS UBWC decoder
* instance
* @CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR: Triggered if any error detected
* in the IPE/BPS UBWC encoder
* instance
* @CAM_CAMNOC_IRQ_AHB_TIMEOUT : Triggered when the QHS_ICP slave
* times out after 4000 AHB cycles
*/
enum cam_camnoc_irq_type {
CAM_CAMNOC_IRQ_SLAVE_ERROR,
CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR,
CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR,
CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
CAM_CAMNOC_IRQ_AHB_TIMEOUT,
};
/**
* struct cam_camnoc_irq_slave_err_data : Data for Slave error.
*
* @mainctrl : Err logger mainctrl info
* @errvld : Err logger errvld info
* @errlog0_low : Err logger errlog0_low info
* @errlog0_high : Err logger errlog0_high info
* @errlog1_low : Err logger errlog1_low info
* @errlog1_high : Err logger errlog1_high info
* @errlog2_low : Err logger errlog2_low info
* @errlog2_high : Err logger errlog2_high info
* @errlog3_low : Err logger errlog3_low info
* @errlog3_high : Err logger errlog3_high info
*
*/
struct cam_camnoc_irq_slave_err_data {
union {
struct {
uint32_t stall_en : 1; /* bit 0 */
uint32_t fault_en : 1; /* bit 1 */
uint32_t rsv : 30; /* bits 2-31 */
};
uint32_t value;
} mainctrl;
union {
struct {
uint32_t err_vld : 1; /* bit 0 */
uint32_t rsv : 31; /* bits 1-31 */
};
uint32_t value;
} errvld;
union {
struct {
uint32_t loginfo_vld : 1; /* bit 0 */
uint32_t word_error : 1; /* bit 1 */
uint32_t non_secure : 1; /* bit 2 */
uint32_t device : 1; /* bit 3 */
uint32_t opc : 3; /* bits 4 - 6 */
uint32_t rsv0 : 1; /* bit 7 */
uint32_t err_code : 3; /* bits 8 - 10 */
uint32_t sizef : 3; /* bits 11 - 13 */
uint32_t rsv1 : 2; /* bits 14 - 15 */
uint32_t addr_space : 6; /* bits 16 - 21 */
uint32_t rsv2 : 10; /* bits 22 - 31 */
};
uint32_t value;
} errlog0_low;
union {
struct {
uint32_t len1 : 10; /* bits 0 - 9 */
uint32_t rsv : 22; /* bits 10 - 31 */
};
uint32_t value;
} errlog0_high;
union {
struct {
uint32_t path : 16; /* bits 0 - 15 */
uint32_t rsv : 16; /* bits 16 - 31 */
};
uint32_t value;
} errlog1_low;
union {
struct {
uint32_t extid : 18; /* bits 0 - 17 */
uint32_t rsv : 14; /* bits 18 - 31 */
};
uint32_t value;
} errlog1_high;
union {
struct {
uint32_t errlog2_lsb : 32; /* bits 0 - 31 */
};
uint32_t value;
} errlog2_low;
union {
struct {
uint32_t errlog2_msb : 16; /* bits 0 - 16 */
uint32_t rsv : 16; /* bits 16 - 31 */
};
uint32_t value;
} errlog2_high;
union {
struct {
uint32_t errlog3_lsb : 32; /* bits 0 - 31 */
};
uint32_t value;
} errlog3_low;
union {
struct {
uint32_t errlog3_msb : 32; /* bits 0 - 31 */
};
uint32_t value;
} errlog3_high;
};
/**
* struct cam_camnoc_irq_ubwc_enc_data : Data for UBWC Encode error.
*
* @encerr_status : Encode error status
*
*/
struct cam_camnoc_irq_ubwc_enc_data {
union {
struct {
uint32_t encerrstatus : 3; /* bits 0 - 2 */
uint32_t rsv : 29; /* bits 3 - 31 */
};
uint32_t value;
} encerr_status;
};
/**
* struct cam_camnoc_irq_ubwc_dec_data : Data for UBWC Decode error.
*
* @decerr_status : Decoder error status
* @thr_err : Set to 1 if
* At least one of the bflc_len fields in the bit steam exceeds
* its threshold value. This error is possible only for
* RGBA1010102, TP10, and RGB565 formats
* @fcl_err : Set to 1 if
* Fast clear with a legal non-RGB format
* @len_md_err : Set to 1 if
* The calculated burst length does not match burst length
* specified by the metadata value
* @format_err : Set to 1 if
* Illegal format
* 1. bad format :2,3,6
* 2. For 32B MAL, metadata=6
* 3. For 32B MAL RGB565, Metadata != 0,1,7
* 4. For 64B MAL RGB565, metadata[3:1] == 1,2
*
*/
struct cam_camnoc_irq_ubwc_dec_data {
union {
struct {
uint32_t thr_err : 1; /* bit 0 */
uint32_t fcl_err : 1; /* bit 1 */
uint32_t len_md_err : 1; /* bit 2 */
uint32_t format_err : 1; /* bit 3 */
uint32_t rsv : 28; /* bits 4 - 31 */
};
uint32_t value;
} decerr_status;
};
struct cam_camnoc_irq_ahb_timeout_data {
uint32_t data;
};
/**
* struct cam_cpas_irq_data : CAMNOC IRQ data
*
* @irq_type : To identify the type of IRQ
* @u : Union of irq err data information
* @slave_err : Data for Slave error.
* Valid if type is CAM_CAMNOC_IRQ_SLAVE_ERROR
* @enc_err : Data for UBWC Encode error.
* Valid if type is one of below:
* CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR
* CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR
* CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR
* @dec_err : Data for UBWC Decode error.
* Valid if type is CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR
* @ahb_err : Data for Slave error.
* Valid if type is CAM_CAMNOC_IRQ_AHB_TIMEOUT
*
*/
struct cam_cpas_irq_data {
enum cam_camnoc_irq_type irq_type;
union {
struct cam_camnoc_irq_slave_err_data slave_err;
struct cam_camnoc_irq_ubwc_enc_data enc_err;
struct cam_camnoc_irq_ubwc_dec_data dec_err;
struct cam_camnoc_irq_ahb_timeout_data ahb_err;
} u;
};
/**
* struct cam_cpas_register_params : Register params for cpas client
*
* @identifier : Input identifier string which is the device label
* from dt like vfe, ife, jpeg etc
* @cell_index : Input integer identifier pointing to the cell index
* from dt of the device. This can be used to form a
* unique string with @identifier like vfe0, ife1,
* jpeg0, etc
* @dev : device handle
* @userdata : Input private data which will be passed as
* an argument while callback.
* @cam_cpas_callback : Input callback pointer for triggering the
* callbacks from CPAS driver.
* @client_handle : CPAS client handle
* @userdata : User data given at the time of register
* @event_type : event type
* @event_data : event data
* @client_handle : Output Unique handle generated for this register
*
*/
struct cam_cpas_register_params {
char identifier[CAM_HW_IDENTIFIER_LENGTH];
uint32_t cell_index;
struct device *dev;
void *userdata;
bool (*cam_cpas_client_cb)(
uint32_t client_handle,
void *userdata,
struct cam_cpas_irq_data *irq_data);
uint32_t client_handle;
};
/**
* enum cam_vote_type - Enum for voting type
*
* @CAM_VOTE_ABSOLUTE : Absolute vote
* @CAM_VOTE_DYNAMIC : Dynamic vote
*/
enum cam_vote_type {
CAM_VOTE_ABSOLUTE,
CAM_VOTE_DYNAMIC,
};
/**
* struct cam_ahb_vote : AHB vote
*
* @type : AHB voting type.
* CAM_VOTE_ABSOLUTE : vote based on the value 'level' is set
* CAM_VOTE_DYNAMIC : vote calculated dynamically using 'freq'
* and 'dev' handle is set
* @level : AHB vote level
* @freq : AHB vote dynamic frequency
*
*/
struct cam_ahb_vote {
enum cam_vote_type type;
union {
enum cam_vote_level level;
unsigned long freq;
} vote;
};
/**
* struct cam_axi_vote : AXI vote
*
* @uncompressed_bw : Bus bandwidth required in Bytes for uncompressed data
* This is the required bandwidth for uncompressed
* data traffic between hw core and camnoc.
* @compressed_bw : Bus bandwidth required in Bytes for compressed data.
* This is the required bandwidth for compressed
* data traffic between camnoc and mmnoc.
*
* If one of the above is not applicable to a hw client, it has to
* fill the same values in both.
*
*/
struct cam_axi_vote {
uint64_t uncompressed_bw;
uint64_t compressed_bw;
};
/**
* cam_cpas_register_client()
*
* @brief: API to register cpas client
*
* @register_params: Input params to register as a client to CPAS
*
* @return 0 on success.
*
*/
int cam_cpas_register_client(
struct cam_cpas_register_params *register_params);
/**
* cam_cpas_unregister_client()
*
* @brief: API to unregister cpas client
*
* @client_handle: Client handle to be unregistered
*
* @return 0 on success.
*
*/
int cam_cpas_unregister_client(uint32_t client_handle);
/**
* cam_cpas_start()
*
* @brief: API to start cpas client hw. Clients have to vote for minimal
* bandwidth requirements for AHB, AXI. Use cam_cpas_update_ahb_vote
* to scale bandwidth after start.
*
* @client_handle: client cpas handle
* @ahb_vote : Pointer to ahb vote info
* @axi_vote : Pointer to axi bandwidth vote info
*
* If AXI vote is not applicable to a particular client, use the value exposed
* by CAM_CPAS_DEFAULT_AXI_BW as the default vote request.
*
* @return 0 on success.
*
*/
int cam_cpas_start(
uint32_t client_handle,
struct cam_ahb_vote *ahb_vote,
struct cam_axi_vote *axi_vote);
/**
* cam_cpas_stop()
*
* @brief: API to stop cpas client hw. Bandwidth for AHB, AXI votes
* would be removed for this client on this call. Clients should not
* use cam_cpas_update_ahb_vote or cam_cpas_update_axi_vote
* to remove their bandwidth vote.
*
* @client_handle: client cpas handle
*
* @return 0 on success.
*
*/
int cam_cpas_stop(uint32_t client_handle);
/**
* cam_cpas_update_ahb_vote()
*
* @brief: API to update AHB vote requirement. Use this function only
* between cam_cpas_start and cam_cpas_stop in case clients wants
* to scale to different vote level. Do not use this function to de-vote,
* removing client's vote is implicit on cam_cpas_stop
*
* @client_handle : Client cpas handle
* @ahb_vote : Pointer to ahb vote info
*
* @return 0 on success.
*
*/
int cam_cpas_update_ahb_vote(
uint32_t client_handle,
struct cam_ahb_vote *ahb_vote);
/**
* cam_cpas_update_axi_vote()
*
* @brief: API to update AXI vote requirement. Use this function only
* between cam_cpas_start and cam_cpas_stop in case clients wants
* to scale to different vote level. Do not use this function to de-vote,
* removing client's vote is implicit on cam_cpas_stop
*
* @client_handle : Client cpas handle
* @axi_vote : Pointer to axi bandwidth vote info
*
* @return 0 on success.
*
*/
int cam_cpas_update_axi_vote(
uint32_t client_handle,
struct cam_axi_vote *axi_vote);
/**
* cam_cpas_reg_write()
*
* @brief: API to write a register value in CPAS register space
*
* @client_handle : Client cpas handle
* @reg_base : Register base identifier
* @offset : Offset from the register base address
* @mb : Whether to do reg write with memory barrier
* @value : Value to be written in register
*
* @return 0 on success.
*
*/
int cam_cpas_reg_write(
uint32_t client_handle,
enum cam_cpas_reg_base reg_base,
uint32_t offset,
bool mb,
uint32_t value);
/**
* cam_cpas_reg_read()
*
* @brief: API to read a register value from CPAS register space
*
* @client_handle : Client cpas handle
* @reg_base : Register base identifier
* @offset : Offset from the register base address
* @mb : Whether to do reg read with memory barrier
* @value : Value to be red from register
*
* @return 0 on success.
*
*/
int cam_cpas_reg_read(
uint32_t client_handle,
enum cam_cpas_reg_base reg_base,
uint32_t offset,
bool mb,
uint32_t *value);
/**
* cam_cpas_get_hw_info()
*
* @brief: API to get camera hw information
*
* @camera_family : Camera family type. One of
* CAM_FAMILY_CAMERA_SS
* CAM_FAMILY_CPAS_SS
* @camera_version : Camera platform version
* @cpas_version : Camera cpas version
* @cam_caps : Camera capability
*
* @return 0 on success.
*
*/
int cam_cpas_get_hw_info(
uint32_t *camera_family,
struct cam_hw_version *camera_version,
struct cam_hw_version *cpas_version,
uint32_t *cam_caps);
/**
* cam_cpas_get_cpas_hw_version()
*
* @brief: API to get camera cpas hw version
*
* @hw_version : Camera cpas hw version
*
* @return 0 on success.
*
*/
int cam_cpas_get_cpas_hw_version(
uint32_t *hw_version);
#endif /* _CAM_CPAS_API_H_ */

View file

@ -0,0 +1,16 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm
ccflags-y += -Idrivers/media/platform/msm/camera/cam_fd
ccflags-y += -Idrivers/media/platform/msm/camera/cam_fd/fd_hw_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw
ccflags-y += -Idrivers/media/platform/msm/camera
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
obj-$(CONFIG_SPECTRA_CAMERA) += fd_hw_mgr/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_fd_dev.o cam_fd_context.o

View file

@ -0,0 +1,249 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include "cam_debug_util.h"
#include "cam_fd_context.h"
#include "cam_trace.h"
static const char fd_dev_name[] = "fd";
/* Functions in Available state */
static int __cam_fd_ctx_acquire_dev_in_available(struct cam_context *ctx,
struct cam_acquire_dev_cmd *cmd)
{
int rc;
rc = cam_context_acquire_dev_to_hw(ctx, cmd);
if (rc) {
CAM_ERR(CAM_FD, "Failed in Acquire dev, rc=%d", rc);
return rc;
}
ctx->state = CAM_CTX_ACQUIRED;
trace_cam_context_state("FD", ctx);
return rc;
}
/* Functions in Acquired state */
static int __cam_fd_ctx_release_dev_in_acquired(struct cam_context *ctx,
struct cam_release_dev_cmd *cmd)
{
int rc;
rc = cam_context_release_dev_to_hw(ctx, cmd);
if (rc) {
CAM_ERR(CAM_FD, "Failed in Release dev, rc=%d", rc);
return rc;
}
ctx->state = CAM_CTX_AVAILABLE;
trace_cam_context_state("FD", ctx);
return rc;
}
static int __cam_fd_ctx_config_dev_in_acquired(struct cam_context *ctx,
struct cam_config_dev_cmd *cmd)
{
int rc;
rc = cam_context_prepare_dev_to_hw(ctx, cmd);
if (rc) {
CAM_ERR(CAM_FD, "Failed in Prepare dev, rc=%d", rc);
return rc;
}
return rc;
}
static int __cam_fd_ctx_start_dev_in_acquired(struct cam_context *ctx,
struct cam_start_stop_dev_cmd *cmd)
{
int rc;
rc = cam_context_start_dev_to_hw(ctx, cmd);
if (rc) {
CAM_ERR(CAM_FD, "Failed in Start dev, rc=%d", rc);
return rc;
}
ctx->state = CAM_CTX_ACTIVATED;
trace_cam_context_state("FD", ctx);
return rc;
}
/* Functions in Activated state */
static int __cam_fd_ctx_stop_dev_in_activated(struct cam_context *ctx,
struct cam_start_stop_dev_cmd *cmd)
{
int rc;
rc = cam_context_stop_dev_to_hw(ctx);
if (rc) {
CAM_ERR(CAM_FD, "Failed in Stop dev, rc=%d", rc);
return rc;
}
ctx->state = CAM_CTX_ACQUIRED;
trace_cam_context_state("FD", ctx);
return rc;
}
static int __cam_fd_ctx_release_dev_in_activated(struct cam_context *ctx,
struct cam_release_dev_cmd *cmd)
{
int rc;
rc = __cam_fd_ctx_stop_dev_in_activated(ctx, NULL);
if (rc) {
CAM_ERR(CAM_FD, "Failed in Stop dev, rc=%d", rc);
return rc;
}
rc = __cam_fd_ctx_release_dev_in_acquired(ctx, cmd);
if (rc) {
CAM_ERR(CAM_FD, "Failed in Release dev, rc=%d", rc);
return rc;
}
return rc;
}
static int __cam_fd_ctx_flush_dev_in_activated(struct cam_context *ctx,
struct cam_flush_dev_cmd *cmd)
{
int rc;
rc = cam_context_flush_dev_to_hw(ctx, cmd);
if (rc)
CAM_ERR(CAM_ICP, "Failed to flush device, rc=%d", rc);
return rc;
}
static int __cam_fd_ctx_config_dev_in_activated(
struct cam_context *ctx, struct cam_config_dev_cmd *cmd)
{
int rc;
rc = cam_context_prepare_dev_to_hw(ctx, cmd);
if (rc) {
CAM_ERR(CAM_FD, "Failed in Prepare dev, rc=%d", rc);
return rc;
}
return rc;
}
static int __cam_fd_ctx_handle_irq_in_activated(void *context,
uint32_t evt_id, void *evt_data)
{
int rc;
rc = cam_context_buf_done_from_hw(context, evt_data, evt_id);
if (rc) {
CAM_ERR(CAM_FD, "Failed in buf done, rc=%d", rc);
return rc;
}
return rc;
}
/* top state machine */
static struct cam_ctx_ops
cam_fd_ctx_state_machine[CAM_CTX_STATE_MAX] = {
/* Uninit */
{
.ioctl_ops = {},
.crm_ops = {},
.irq_ops = NULL,
},
/* Available */
{
.ioctl_ops = {
.acquire_dev = __cam_fd_ctx_acquire_dev_in_available,
},
.crm_ops = {},
.irq_ops = NULL,
},
/* Acquired */
{
.ioctl_ops = {
.release_dev = __cam_fd_ctx_release_dev_in_acquired,
.config_dev = __cam_fd_ctx_config_dev_in_acquired,
.start_dev = __cam_fd_ctx_start_dev_in_acquired,
},
.crm_ops = {},
.irq_ops = NULL,
},
/* Ready */
{
.ioctl_ops = { },
.crm_ops = {},
.irq_ops = NULL,
},
/* Activated */
{
.ioctl_ops = {
.stop_dev = __cam_fd_ctx_stop_dev_in_activated,
.release_dev = __cam_fd_ctx_release_dev_in_activated,
.config_dev = __cam_fd_ctx_config_dev_in_activated,
.flush_dev = __cam_fd_ctx_flush_dev_in_activated,
},
.crm_ops = {},
.irq_ops = __cam_fd_ctx_handle_irq_in_activated,
},
};
int cam_fd_context_init(struct cam_fd_context *fd_ctx,
struct cam_context *base_ctx, struct cam_hw_mgr_intf *hw_intf,
uint32_t ctx_id)
{
int rc;
if (!base_ctx || !fd_ctx) {
CAM_ERR(CAM_FD, "Invalid Context %pK %pK", base_ctx, fd_ctx);
return -EINVAL;
}
memset(fd_ctx, 0, sizeof(*fd_ctx));
rc = cam_context_init(base_ctx, fd_dev_name, CAM_FD, ctx_id,
NULL, hw_intf, fd_ctx->req_base, CAM_CTX_REQ_MAX);
if (rc) {
CAM_ERR(CAM_FD, "Camera Context Base init failed, rc=%d", rc);
return rc;
}
fd_ctx->base = base_ctx;
base_ctx->ctx_priv = fd_ctx;
base_ctx->state_machine = cam_fd_ctx_state_machine;
return rc;
}
int cam_fd_context_deinit(struct cam_fd_context *fd_ctx)
{
int rc = 0;
if (!fd_ctx || !fd_ctx->base) {
CAM_ERR(CAM_FD, "Invalid inputs %pK", fd_ctx);
return -EINVAL;
}
rc = cam_context_deinit(fd_ctx->base);
if (rc)
CAM_ERR(CAM_FD, "Error in base deinit, rc=%d", rc);
memset(fd_ctx, 0, sizeof(*fd_ctx));
return rc;
}

View file

@ -0,0 +1,30 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_FD_CONTEXT_H_
#define _CAM_FD_CONTEXT_H_
#include "cam_context.h"
#include "cam_context_utils.h"
#include "cam_hw_mgr_intf.h"
#include "cam_req_mgr_interface.h"
/**
* struct cam_fd_context - Face Detection context information
*
* @base : Base context pointer for this FD context
* @req_base : List of base requests for this FD context
*/
struct cam_fd_context {
struct cam_context *base;
struct cam_ctx_request req_base[CAM_CTX_REQ_MAX];
};
int cam_fd_context_init(struct cam_fd_context *fd_ctx,
struct cam_context *base_ctx, struct cam_hw_mgr_intf *hw_intf,
uint32_t ctx_id);
int cam_fd_context_deinit(struct cam_fd_context *ctx);
#endif /* _CAM_FD_CONTEXT_H_ */

View file

@ -0,0 +1,207 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include "cam_subdev.h"
#include "cam_node.h"
#include "cam_fd_context.h"
#include "cam_fd_hw_mgr.h"
#include "cam_fd_hw_mgr_intf.h"
#define CAM_FD_DEV_NAME "cam-fd"
/**
* struct cam_fd_dev - FD device information
*
* @sd: Subdev information
* @base_ctx: List of base contexts
* @fd_ctx: List of FD contexts
* @lock: Mutex handle
* @open_cnt: FD subdev open count
* @probe_done: Whether FD probe is completed
*/
struct cam_fd_dev {
struct cam_subdev sd;
struct cam_context base_ctx[CAM_CTX_MAX];
struct cam_fd_context fd_ctx[CAM_CTX_MAX];
struct mutex lock;
uint32_t open_cnt;
bool probe_done;
};
static struct cam_fd_dev g_fd_dev;
static int cam_fd_dev_open(struct v4l2_subdev *sd,
struct v4l2_subdev_fh *fh)
{
struct cam_fd_dev *fd_dev = &g_fd_dev;
if (!fd_dev->probe_done) {
CAM_ERR(CAM_FD, "FD Dev not initialized, fd_dev=%pK", fd_dev);
return -ENODEV;
}
mutex_lock(&fd_dev->lock);
fd_dev->open_cnt++;
CAM_DBG(CAM_FD, "FD Subdev open count %d", fd_dev->open_cnt);
mutex_unlock(&fd_dev->lock);
return 0;
}
static int cam_fd_dev_close(struct v4l2_subdev *sd,
struct v4l2_subdev_fh *fh)
{
struct cam_fd_dev *fd_dev = &g_fd_dev;
struct cam_node *node = v4l2_get_subdevdata(sd);
if (!fd_dev->probe_done) {
CAM_ERR(CAM_FD, "FD Dev not initialized, fd_dev=%pK", fd_dev);
return -ENODEV;
}
mutex_lock(&fd_dev->lock);
fd_dev->open_cnt--;
CAM_DBG(CAM_FD, "FD Subdev open count %d", fd_dev->open_cnt);
mutex_unlock(&fd_dev->lock);
if (!node) {
CAM_ERR(CAM_FD, "Node ptr is NULL");
return -EINVAL;
}
cam_node_shutdown(node);
return 0;
}
static const struct v4l2_subdev_internal_ops cam_fd_subdev_internal_ops = {
.open = cam_fd_dev_open,
.close = cam_fd_dev_close,
};
static int cam_fd_dev_probe(struct platform_device *pdev)
{
int rc;
int i;
struct cam_hw_mgr_intf hw_mgr_intf;
struct cam_node *node;
g_fd_dev.sd.internal_ops = &cam_fd_subdev_internal_ops;
/* Initialize the v4l2 subdevice first. (create cam_node) */
rc = cam_subdev_probe(&g_fd_dev.sd, pdev, CAM_FD_DEV_NAME,
CAM_FD_DEVICE_TYPE);
if (rc) {
CAM_ERR(CAM_FD, "FD cam_subdev_probe failed, rc=%d", rc);
return rc;
}
node = (struct cam_node *) g_fd_dev.sd.token;
rc = cam_fd_hw_mgr_init(pdev->dev.of_node, &hw_mgr_intf);
if (rc) {
CAM_ERR(CAM_FD, "Failed in initializing FD HW manager, rc=%d",
rc);
goto unregister_subdev;
}
for (i = 0; i < CAM_CTX_MAX; i++) {
rc = cam_fd_context_init(&g_fd_dev.fd_ctx[i],
&g_fd_dev.base_ctx[i], &node->hw_mgr_intf, i);
if (rc) {
CAM_ERR(CAM_FD, "FD context init failed i=%d, rc=%d",
i, rc);
goto deinit_ctx;
}
}
rc = cam_node_init(node, &hw_mgr_intf, g_fd_dev.base_ctx, CAM_CTX_MAX,
CAM_FD_DEV_NAME);
if (rc) {
CAM_ERR(CAM_FD, "FD node init failed, rc=%d", rc);
goto deinit_ctx;
}
mutex_init(&g_fd_dev.lock);
g_fd_dev.probe_done = true;
CAM_DBG(CAM_FD, "Camera FD probe complete");
return 0;
deinit_ctx:
for (--i; i >= 0; i--) {
if (cam_fd_context_deinit(&g_fd_dev.fd_ctx[i]))
CAM_ERR(CAM_FD, "FD context %d deinit failed", i);
}
unregister_subdev:
if (cam_subdev_remove(&g_fd_dev.sd))
CAM_ERR(CAM_FD, "Failed in subdev remove");
return rc;
}
static int cam_fd_dev_remove(struct platform_device *pdev)
{
int i, rc;
for (i = 0; i < CAM_CTX_MAX; i++) {
rc = cam_fd_context_deinit(&g_fd_dev.fd_ctx[i]);
if (rc)
CAM_ERR(CAM_FD, "FD context %d deinit failed, rc=%d",
i, rc);
}
rc = cam_fd_hw_mgr_deinit(pdev->dev.of_node);
if (rc)
CAM_ERR(CAM_FD, "Failed in hw mgr deinit, rc=%d", rc);
rc = cam_subdev_remove(&g_fd_dev.sd);
if (rc)
CAM_ERR(CAM_FD, "Unregister failed, rc=%d", rc);
mutex_destroy(&g_fd_dev.lock);
g_fd_dev.probe_done = false;
return rc;
}
static const struct of_device_id cam_fd_dt_match[] = {
{
.compatible = "qcom,cam-fd"
},
{}
};
static struct platform_driver cam_fd_driver = {
.probe = cam_fd_dev_probe,
.remove = cam_fd_dev_remove,
.driver = {
.name = "cam_fd",
.owner = THIS_MODULE,
.of_match_table = cam_fd_dt_match,
.suppress_bind_attrs = true,
},
};
static int __init cam_fd_dev_init_module(void)
{
return platform_driver_register(&cam_fd_driver);
}
static void __exit cam_fd_dev_exit_module(void)
{
platform_driver_unregister(&cam_fd_driver);
}
module_init(cam_fd_dev_init_module);
module_exit(cam_fd_dev_exit_module);
MODULE_DESCRIPTION("MSM FD driver");
MODULE_LICENSE("GPL v2");

View file

@ -0,0 +1,16 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm
ccflags-y += -Idrivers/media/platform/msm/camera/cam_fd
ccflags-y += -Idrivers/media/platform/msm/camera/cam_fd/fd_hw_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw
ccflags-y += -Idrivers/media/platform/msm/camera
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
obj-$(CONFIG_SPECTRA_CAMERA) += fd_hw/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_fd_hw_mgr.o

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,179 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_FD_HW_MGR_H_
#define _CAM_FD_HW_MGR_H_
#include <linux/module.h>
#include <linux/kernel.h>
#include <media/cam_fd.h>
#include "cam_hw.h"
#include "cam_hw_intf.h"
#include "cam_cpas_api.h"
#include "cam_debug_util.h"
#include "cam_hw_mgr_intf.h"
#include "cam_req_mgr_workq.h"
#include "cam_fd_hw_intf.h"
#define CAM_FD_HW_MAX 1
#define CAM_FD_WORKQ_NUM_TASK 10
struct cam_fd_hw_mgr;
/**
* enum cam_fd_mgr_work_type - Type of worker task
*
* @CAM_FD_WORK_FRAME : Work type indicating frame task
* @CAM_FD_WORK_IRQ : Work type indicating irq task
*/
enum cam_fd_mgr_work_type {
CAM_FD_WORK_FRAME,
CAM_FD_WORK_IRQ,
};
/**
* struct cam_fd_hw_mgr_ctx : FD HW Mgr context
*
* @list : List pointer used to maintain this context
* in free, used list
* @ctx_index : Index of this context
* @ctx_in_use : Whether this context is in use
* @event_cb : Event callback pointer to notify cam core context
* @cb_priv : Event callback private pointer
* @hw_mgr : Pointer to hw manager
* @get_raw_results : Whether this context needs raw results
* @mode : Mode in which this context runs
* @device_index : HW Device used by this context
* @ctx_hw_private : HW layer's private context pointer for this context
* @priority : Priority of this context
*/
struct cam_fd_hw_mgr_ctx {
struct list_head list;
uint32_t ctx_index;
bool ctx_in_use;
cam_hw_event_cb_func event_cb;
void *cb_priv;
struct cam_fd_hw_mgr *hw_mgr;
bool get_raw_results;
enum cam_fd_hw_mode mode;
int32_t device_index;
void *ctx_hw_private;
uint32_t priority;
};
/**
* struct cam_fd_device : FD HW Device
*
* @hw_caps : This FD device's capabilities
* @hw_intf : FD device's interface information
* @ready_to_process : Whether this device is ready to process next frame
* @num_ctxts : Number of context currently running on this device
* @valid : Whether this device is valid
* @lock : Lock used for protectin
* @cur_hw_ctx : current hw context running in the device
* @req_id : current processing req id
*/
struct cam_fd_device {
struct cam_fd_hw_caps hw_caps;
struct cam_hw_intf *hw_intf;
bool ready_to_process;
uint32_t num_ctxts;
bool valid;
struct mutex lock;
struct cam_fd_hw_mgr_ctx *cur_hw_ctx;
int64_t req_id;
};
/**
* struct cam_fd_mgr_frame_request : Frame request information maintained
* in HW Mgr layer
*
* @list : List pointer used to maintain this request in
* free, pending, processing request lists
* @request_id : Request ID corresponding to this request
* @hw_ctx : HW context from which this request is coming
* @hw_req_private : HW layer's private information specific to
* this request
* @hw_update_entries : HW update entries corresponding to this request
* which needs to be submitted to HW through CDM
* @num_hw_update_entries : Number of HW update entries
*/
struct cam_fd_mgr_frame_request {
struct list_head list;
uint64_t request_id;
struct cam_fd_hw_mgr_ctx *hw_ctx;
struct cam_fd_hw_req_private hw_req_private;
struct cam_hw_update_entry hw_update_entries[CAM_FD_MAX_HW_ENTRIES];
uint32_t num_hw_update_entries;
};
/**
* struct cam_fd_mgr_work_data : HW Mgr work data information
*
* @type : Type of work
* @irq_type : IRQ type when this work is queued because of irq callback
*/
struct cam_fd_mgr_work_data {
enum cam_fd_mgr_work_type type;
enum cam_fd_hw_irq_type irq_type;
};
/**
* struct cam_fd_hw_mgr : FD HW Mgr information
*
* @free_ctx_list : List of free contexts available for acquire
* @used_ctx_list : List of contexts that are acquired
* @frame_free_list : List of free frame requests available
* @frame_pending_list_high : List of high priority frame requests pending
* for processing
* @frame_pending_list_normal : List of normal priority frame requests pending
* for processing
* @frame_processing_list : List of frame requests currently being
* processed currently. Generally maximum one
* request would be present in this list
* @hw_mgr_mutex : Mutex to protect hw mgr data when accessed
* from multiple threads
* @hw_mgr_slock : Spin lock to protect hw mgr data when accessed
* from multiple threads
* @ctx_mutex : Mutex to protect context list
* @frame_req_mutex : Mutex to protect frame request list
* @device_iommu : Device IOMMU information
* @cdm_iommu : CDM IOMMU information
* @hw_device : Underlying HW device information
* @num_devices : Number of HW devices available
* @raw_results_available : Whether raw results available in this driver
* @supported_modes : Supported modes by this driver
* @ctx_pool : List of context
* @frame_req : List of frame requests
* @work : Worker handle
* @work_data : Worker data
* @fd_caps : FD driver capabilities
*/
struct cam_fd_hw_mgr {
struct list_head free_ctx_list;
struct list_head used_ctx_list;
struct list_head frame_free_list;
struct list_head frame_pending_list_high;
struct list_head frame_pending_list_normal;
struct list_head frame_processing_list;
struct mutex hw_mgr_mutex;
spinlock_t hw_mgr_slock;
struct mutex ctx_mutex;
struct mutex frame_req_mutex;
struct cam_iommu_handle device_iommu;
struct cam_iommu_handle cdm_iommu;
struct cam_fd_device hw_device[CAM_FD_HW_MAX];
uint32_t num_devices;
bool raw_results_available;
uint32_t supported_modes;
struct cam_fd_hw_mgr_ctx ctx_pool[CAM_CTX_MAX];
struct cam_fd_mgr_frame_request frame_req[CAM_CTX_REQ_MAX];
struct cam_req_mgr_core_workq *work;
struct cam_fd_mgr_work_data work_data[CAM_FD_WORKQ_NUM_TASK];
struct cam_fd_query_cap_cmd fd_caps;
};
#endif /* _CAM_FD_HW_MGR_H_ */

View file

@ -0,0 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_FD_HW_MGR_INTF_H_
#define _CAM_FD_HW_MGR_INTF_H_
#include <linux/of.h>
#include "cam_debug_util.h"
#include "cam_hw_mgr_intf.h"
int cam_fd_hw_mgr_init(struct device_node *of_node,
struct cam_hw_mgr_intf *hw_mgr_intf);
int cam_fd_hw_mgr_deinit(struct device_node *of_node);
#endif /* _CAM_FD_HW_MGR_INTF_H_ */

View file

@ -0,0 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm
ccflags-y += -Idrivers/media/platform/msm/camera/cam_fd
ccflags-y += -Idrivers/media/platform/msm/camera/cam_fd/fd_hw_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw
ccflags-y += -Idrivers/media/platform/msm/camera
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
obj-$(CONFIG_SPECTRA_CAMERA) += cam_fd_hw_dev.o cam_fd_hw_core.o cam_fd_hw_soc.o

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,237 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_FD_HW_CORE_H_
#define _CAM_FD_HW_CORE_H_
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <media/cam_defs.h>
#include <media/cam_fd.h>
#include "cam_common_util.h"
#include "cam_debug_util.h"
#include "cam_io_util.h"
#include "cam_cpas_api.h"
#include "cam_cdm_intf_api.h"
#include "cam_fd_hw_intf.h"
#include "cam_fd_hw_soc.h"
#define CAM_FD_IRQ_TO_MASK(irq) (1 << (irq))
#define CAM_FD_MASK_TO_IRQ(mask, irq) ((mask) >> (irq))
#define CAM_FD_HW_HALT_RESET_TIMEOUT 750
/**
* enum cam_fd_core_state - FD Core internal states
*
* @CAM_FD_CORE_STATE_POWERDOWN : Indicates FD core is powered down
* @CAM_FD_CORE_STATE_IDLE : Indicates FD HW is in idle state.
* Core can be in this state when it is
* ready to process frames or when
* processing is finished and results are
* available
* @CAM_FD_CORE_STATE_PROCESSING : Indicates FD core is processing frame
* @CAM_FD_CORE_STATE_READING_RESULTS : Indicates results are being read from
* FD core
* @CAM_FD_CORE_STATE_RESET_PROGRESS : Indicates FD Core is in reset state
*/
enum cam_fd_core_state {
CAM_FD_CORE_STATE_POWERDOWN,
CAM_FD_CORE_STATE_IDLE,
CAM_FD_CORE_STATE_PROCESSING,
CAM_FD_CORE_STATE_READING_RESULTS,
CAM_FD_CORE_STATE_RESET_PROGRESS,
};
/**
* struct cam_fd_ctx_hw_private : HW private information for a specific hw ctx.
* This information is populated by HW layer on
* reserve() and given back to HW Mgr as private
* data for the hw context. This private_data
* has to be passed by HW Mgr layer while
* further HW layer calls
*
* @hw_ctx : Corresponding hw_ctx pointer
* @fd_hw : FD HW info pointer
* @cdm_handle : CDM Handle for this context
* @cdm_ops : CDM Ops
* @cdm_cmd : CDM command pointer
* @mode : Mode this context is running
* @curr_req_private : Current Request information
*
*/
struct cam_fd_ctx_hw_private {
void *hw_ctx;
struct cam_hw_info *fd_hw;
uint32_t cdm_handle;
struct cam_cdm_utils_ops *cdm_ops;
struct cam_cdm_bl_request *cdm_cmd;
enum cam_fd_hw_mode mode;
struct cam_fd_hw_req_private *curr_req_private;
};
/**
* struct cam_fd_core_regs : FD HW Core register offsets info
*
* @version : Offset of version register
* @control : Offset of control register
* @result_cnt : Offset of result count register
* @result_addr : Offset of results address register
* @image_addr : Offset of image address register
* @work_addr : Offset of work address register
* @ro_mode : Offset of ro_mode register
* @results_reg_base : Offset of results_reg_base register
* @raw_results_reg_base : Offset of raw_results_reg_base register
*
*/
struct cam_fd_core_regs {
uint32_t version;
uint32_t control;
uint32_t result_cnt;
uint32_t result_addr;
uint32_t image_addr;
uint32_t work_addr;
uint32_t ro_mode;
uint32_t results_reg_base;
uint32_t raw_results_reg_base;
};
/**
* struct cam_fd_core_regs : FD HW Wrapper register offsets info
*
* @wrapper_version : Offset of wrapper_version register
* @cgc_disable : Offset of cgc_disable register
* @hw_stop : Offset of hw_stop register
* @sw_reset : Offset of sw_reset register
* @vbif_req_priority : Offset of vbif_req_priority register
* @vbif_priority_level : Offset of vbif_priority_level register
* @vbif_done_status : Offset of vbif_done_status register
* @irq_mask : Offset of irq mask register
* @irq_status : Offset of irq status register
* @irq_clear : Offset of irq clear register
*
*/
struct cam_fd_wrapper_regs {
uint32_t wrapper_version;
uint32_t cgc_disable;
uint32_t hw_stop;
uint32_t sw_reset;
uint32_t vbif_req_priority;
uint32_t vbif_priority_level;
uint32_t vbif_done_status;
uint32_t irq_mask;
uint32_t irq_status;
uint32_t irq_clear;
};
/**
* struct cam_fd_hw_errata_wa : FD HW Errata workaround enable/dsiable info
*
* @single_irq_only : Whether to enable only one irq at any time
* @ro_mode_enable_always : Whether to enable ro mode always
* @ro_mode_results_invalid : Whether results written directly into output
* memory by HW are valid or not
*/
struct cam_fd_hw_errata_wa {
bool single_irq_only;
bool ro_mode_enable_always;
bool ro_mode_results_invalid;
};
/**
* struct cam_fd_hw_results_prop : FD HW Results properties
*
* @max_faces : Maximum number of faces supported
* @per_face_entries : Number of register with properties for each face
* @raw_results_entries : Number of raw results entries for the full search
* @raw_results_available : Whether raw results available on this HW
*
*/
struct cam_fd_hw_results_prop {
uint32_t max_faces;
uint32_t per_face_entries;
uint32_t raw_results_entries;
bool raw_results_available;
};
/**
* struct cam_fd_hw_static_info : FD HW information based on HW version
*
* @core_version : Core version of FD HW
* @wrapper_version : Wrapper version of FD HW
* @core_regs : Register offset information for core registers
* @wrapper_regs : Register offset information for wrapper registers
* @results : Information about results available on this HW
* @enable_errata_wa : Errata workaround information
* @irq_mask : IRQ mask to enable
* @qos_priority : QoS priority setting for this chipset
* @qos_priority_level : QoS priority level setting for this chipset
* @supported_modes : Supported HW modes on this HW version
* @ro_mode_supported : Whether RO mode is supported on this HW
*
*/
struct cam_fd_hw_static_info {
struct cam_hw_version core_version;
struct cam_hw_version wrapper_version;
struct cam_fd_core_regs core_regs;
struct cam_fd_wrapper_regs wrapper_regs;
struct cam_fd_hw_results_prop results;
struct cam_fd_hw_errata_wa enable_errata_wa;
uint32_t irq_mask;
uint32_t qos_priority;
uint32_t qos_priority_level;
uint32_t supported_modes;
bool ro_mode_supported;
};
/**
* struct cam_fd_core : FD HW core data structure
*
* @hw_static_info : HW information specific to version
* @hw_caps : HW capabilities
* @core_state : Current HW state
* @processing_complete : Whether processing is complete
* @reset_complete : Whether reset is complete
* @halt_complete : Whether halt is complete
* @hw_req_private : Request that is being currently processed by HW
* @results_valid : Whether HW frame results are available to get
* @spin_lock : Mutex to protect shared data in hw layer
* @irq_cb : HW Manager callback information
*
*/
struct cam_fd_core {
struct cam_fd_hw_static_info *hw_static_info;
struct cam_fd_hw_caps hw_caps;
enum cam_fd_core_state core_state;
struct completion processing_complete;
struct completion reset_complete;
struct completion halt_complete;
struct cam_fd_hw_req_private *hw_req_private;
bool results_valid;
spinlock_t spin_lock;
struct cam_fd_hw_cmd_set_irq_cb irq_cb;
};
int cam_fd_hw_util_get_hw_caps(struct cam_hw_info *fd_hw,
struct cam_fd_hw_caps *hw_caps);
irqreturn_t cam_fd_hw_irq(int irq_num, void *data);
int cam_fd_hw_get_hw_caps(void *hw_priv, void *get_hw_cap_args,
uint32_t arg_size);
int cam_fd_hw_init(void *hw_priv, void *init_hw_args, uint32_t arg_size);
int cam_fd_hw_deinit(void *hw_priv, void *deinit_hw_args, uint32_t arg_size);
int cam_fd_hw_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size);
int cam_fd_hw_reserve(void *hw_priv, void *hw_reserve_args, uint32_t arg_size);
int cam_fd_hw_release(void *hw_priv, void *hw_release_args, uint32_t arg_size);
int cam_fd_hw_start(void *hw_priv, void *hw_start_args, uint32_t arg_size);
int cam_fd_hw_halt_reset(void *hw_priv, void *stop_args, uint32_t arg_size);
int cam_fd_hw_read(void *hw_priv, void *read_args, uint32_t arg_size);
int cam_fd_hw_write(void *hw_priv, void *write_args, uint32_t arg_size);
int cam_fd_hw_process_cmd(void *hw_priv, uint32_t cmd_type,
void *cmd_args, uint32_t arg_size);
#endif /* _CAM_FD_HW_CORE_H_ */

View file

@ -0,0 +1,234 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include "cam_subdev.h"
#include "cam_fd_hw_intf.h"
#include "cam_fd_hw_core.h"
#include "cam_fd_hw_soc.h"
#include "cam_fd_hw_v41.h"
#include "cam_fd_hw_v501.h"
static char fd_dev_name[8];
static int cam_fd_hw_dev_probe(struct platform_device *pdev)
{
struct cam_hw_info *fd_hw;
struct cam_hw_intf *fd_hw_intf;
struct cam_fd_core *fd_core;
const struct of_device_id *match_dev = NULL;
struct cam_fd_hw_static_info *hw_static_info = NULL;
int rc = 0;
uint32_t hw_idx;
struct cam_fd_hw_init_args init_args;
struct cam_fd_hw_deinit_args deinit_args;
fd_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
if (!fd_hw_intf)
return -ENOMEM;
fd_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
if (!fd_hw) {
kfree(fd_hw_intf);
return -ENOMEM;
}
fd_core = kzalloc(sizeof(struct cam_fd_core), GFP_KERNEL);
if (!fd_core) {
kfree(fd_hw);
kfree(fd_hw_intf);
return -ENOMEM;
}
of_property_read_u32(pdev->dev.of_node,
"cell-index", &hw_idx);
fd_hw_intf->hw_priv = fd_hw;
fd_hw->core_info = fd_core;
fd_hw_intf->hw_idx = hw_idx;
memset(fd_dev_name, 0, sizeof(fd_dev_name));
snprintf(fd_dev_name, sizeof(fd_dev_name),
"fd%1u", fd_hw_intf->hw_idx);
fd_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
fd_hw->soc_info.pdev = pdev;
fd_hw->soc_info.dev = &pdev->dev;
fd_hw->soc_info.dev_name = fd_dev_name;
fd_hw->open_count = 0;
mutex_init(&fd_hw->hw_mutex);
spin_lock_init(&fd_hw->hw_lock);
init_completion(&fd_hw->hw_complete);
spin_lock_init(&fd_core->spin_lock);
init_completion(&fd_core->processing_complete);
init_completion(&fd_core->halt_complete);
init_completion(&fd_core->reset_complete);
fd_hw_intf->hw_ops.get_hw_caps = cam_fd_hw_get_hw_caps;
fd_hw_intf->hw_ops.init = cam_fd_hw_init;
fd_hw_intf->hw_ops.deinit = cam_fd_hw_deinit;
fd_hw_intf->hw_ops.reset = cam_fd_hw_reset;
fd_hw_intf->hw_ops.reserve = cam_fd_hw_reserve;
fd_hw_intf->hw_ops.release = cam_fd_hw_release;
fd_hw_intf->hw_ops.start = cam_fd_hw_start;
fd_hw_intf->hw_ops.stop = cam_fd_hw_halt_reset;
fd_hw_intf->hw_ops.read = NULL;
fd_hw_intf->hw_ops.write = NULL;
fd_hw_intf->hw_ops.process_cmd = cam_fd_hw_process_cmd;
fd_hw_intf->hw_type = CAM_HW_FD;
match_dev = of_match_device(pdev->dev.driver->of_match_table,
&pdev->dev);
if (!match_dev || !match_dev->data) {
CAM_ERR(CAM_FD, "No Of_match data, %pK", match_dev);
rc = -EINVAL;
goto free_memory;
}
hw_static_info = (struct cam_fd_hw_static_info *)match_dev->data;
fd_core->hw_static_info = hw_static_info;
CAM_DBG(CAM_FD, "HW Static Info : version core[%d.%d] wrapper[%d.%d]",
hw_static_info->core_version.major,
hw_static_info->core_version.minor,
hw_static_info->wrapper_version.major,
hw_static_info->wrapper_version.minor);
rc = cam_fd_soc_init_resources(&fd_hw->soc_info, cam_fd_hw_irq, fd_hw);
if (rc) {
CAM_ERR(CAM_FD, "Failed to init soc, rc=%d", rc);
goto free_memory;
}
memset(&init_args, 0x0, sizeof(init_args));
memset(&deinit_args, 0x0, sizeof(deinit_args));
rc = cam_fd_hw_init(fd_hw, &init_args, sizeof(init_args));
if (rc) {
CAM_ERR(CAM_FD, "Failed to hw init, rc=%d", rc);
goto deinit_platform_res;
}
rc = cam_fd_hw_util_get_hw_caps(fd_hw, &fd_core->hw_caps);
if (rc) {
CAM_ERR(CAM_FD, "Failed to get hw caps, rc=%d", rc);
goto deinit_hw;
}
rc = cam_fd_hw_deinit(fd_hw, &deinit_args, sizeof(deinit_args));
if (rc) {
CAM_ERR(CAM_FD, "Failed to deinit hw, rc=%d", rc);
goto deinit_platform_res;
}
platform_set_drvdata(pdev, fd_hw_intf);
CAM_DBG(CAM_FD, "FD-%d probe successful", fd_hw_intf->hw_idx);
return rc;
deinit_hw:
if (cam_fd_hw_deinit(fd_hw, &deinit_args, sizeof(deinit_args)))
CAM_ERR(CAM_FD, "Failed in hw deinit");
deinit_platform_res:
if (cam_fd_soc_deinit_resources(&fd_hw->soc_info))
CAM_ERR(CAM_FD, "Failed in soc deinit");
mutex_destroy(&fd_hw->hw_mutex);
free_memory:
kfree(fd_hw);
kfree(fd_hw_intf);
kfree(fd_core);
return rc;
}
static int cam_fd_hw_dev_remove(struct platform_device *pdev)
{
int rc = 0;
struct cam_hw_intf *fd_hw_intf;
struct cam_hw_info *fd_hw;
struct cam_fd_core *fd_core;
fd_hw_intf = platform_get_drvdata(pdev);
if (!fd_hw_intf) {
CAM_ERR(CAM_FD, "Invalid fd_hw_intf from pdev");
return -EINVAL;
}
fd_hw = fd_hw_intf->hw_priv;
if (!fd_hw) {
CAM_ERR(CAM_FD, "Invalid fd_hw from fd_hw_intf");
rc = -ENODEV;
goto free_fd_hw_intf;
}
fd_core = (struct cam_fd_core *)fd_hw->core_info;
if (!fd_core) {
CAM_ERR(CAM_FD, "Invalid fd_core from fd_hw");
rc = -EINVAL;
goto deinit_platform_res;
}
kfree(fd_core);
deinit_platform_res:
rc = cam_fd_soc_deinit_resources(&fd_hw->soc_info);
if (rc)
CAM_ERR(CAM_FD, "Error in FD soc deinit, rc=%d", rc);
mutex_destroy(&fd_hw->hw_mutex);
kfree(fd_hw);
free_fd_hw_intf:
kfree(fd_hw_intf);
return rc;
}
static const struct of_device_id cam_fd_hw_dt_match[] = {
{
.compatible = "qcom,fd41",
.data = &cam_fd_wrapper120_core410_info,
},
{
.compatible = "qcom,fd501",
.data = &cam_fd_wrapper200_core501_info,
},
{
.compatible = "qcom,fd501",
.data = &cam_fd_wrapper200_core501_info,
},
{}
};
MODULE_DEVICE_TABLE(of, cam_fd_hw_dt_match);
static struct platform_driver cam_fd_hw_driver = {
.probe = cam_fd_hw_dev_probe,
.remove = cam_fd_hw_dev_remove,
.driver = {
.name = "cam_fd_hw",
.owner = THIS_MODULE,
.of_match_table = cam_fd_hw_dt_match,
.suppress_bind_attrs = true,
},
};
static int __init cam_fd_hw_init_module(void)
{
return platform_driver_register(&cam_fd_hw_driver);
}
static void __exit cam_fd_hw_exit_module(void)
{
platform_driver_unregister(&cam_fd_hw_driver);
}
module_init(cam_fd_hw_init_module);
module_exit(cam_fd_hw_exit_module);
MODULE_DESCRIPTION("CAM FD HW driver");
MODULE_LICENSE("GPL v2");

View file

@ -0,0 +1,282 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_FD_HW_INTF_H_
#define _CAM_FD_HW_INTF_H_
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <media/cam_cpas.h>
#include <media/cam_req_mgr.h>
#include <media/cam_fd.h>
#include "cam_io_util.h"
#include "cam_soc_util.h"
#include "cam_hw.h"
#include "cam_hw_intf.h"
#include "cam_subdev.h"
#include "cam_cpas_api.h"
#include "cam_hw_mgr_intf.h"
#include "cam_debug_util.h"
#define CAM_FD_MAX_IO_BUFFERS 5
#define CAM_FD_MAX_HW_ENTRIES 5
/**
* enum cam_fd_hw_type - Enum for FD HW type
*
* @CAM_HW_FD : FaceDetection HW type
*/
enum cam_fd_hw_type {
CAM_HW_FD,
};
/**
* enum cam_fd_hw_mode - Mode in which HW can run
*
* @CAM_FD_MODE_FACEDETECTION : Face Detection mode in which face search
* is done on the given frame
* @CAM_FD_MODE_PYRAMID : Pyramid mode where a pyramid image is generated
* from an input image
*/
enum cam_fd_hw_mode {
CAM_FD_MODE_FACEDETECTION = 0x1,
CAM_FD_MODE_PYRAMID = 0x2,
};
/**
* enum cam_fd_priority - FD priority levels
*
* @CAM_FD_PRIORITY_HIGH : Indicates high priority client, driver prioritizes
* frame requests coming from contexts with HIGH
* priority compared to context with normal priority
* @CAM_FD_PRIORITY_NORMAL : Indicates normal priority client
*/
enum cam_fd_priority {
CAM_FD_PRIORITY_HIGH = 0x0,
CAM_FD_PRIORITY_NORMAL,
};
/**
* enum cam_fd_hw_irq_type - FD HW IRQ types
*
* @CAM_FD_IRQ_FRAME_DONE : Indicates frame processing is finished
* @CAM_FD_IRQ_HALT_DONE : Indicates HW halt is finished
* @CAM_FD_IRQ_RESET_DONE : Indicates HW reset is finished
*/
enum cam_fd_hw_irq_type {
CAM_FD_IRQ_FRAME_DONE,
CAM_FD_IRQ_HALT_DONE,
CAM_FD_IRQ_RESET_DONE,
};
/**
* enum cam_fd_hw_cmd_type - FD HW layer custom commands
*
* @CAM_FD_HW_CMD_PRESTART : Command to process pre-start settings
* @CAM_FD_HW_CMD_FRAME_DONE : Command to process frame done settings
* @CAM_FD_HW_CMD_UPDATE_SOC : Command to process soc update
* @CAM_FD_HW_CMD_REGISTER_CALLBACK : Command to set hw mgr callback
* @CAM_FD_HW_CMD_MAX : Indicates max cmd
*/
enum cam_fd_hw_cmd_type {
CAM_FD_HW_CMD_PRESTART,
CAM_FD_HW_CMD_FRAME_DONE,
CAM_FD_HW_CMD_UPDATE_SOC,
CAM_FD_HW_CMD_REGISTER_CALLBACK,
CAM_FD_HW_CMD_MAX,
};
/**
* struct cam_fd_hw_io_buffer : FD HW IO Buffer information
*
* @valid : Whether this IO Buf configuration is valid
* @io_cfg : IO Configuration information
* @num_buf : Number planes in io_addr, cpu_addr array
* @io_addr : Array of IO address information for planes
* @cpu_addr : Array of CPU address information for planes
*/
struct cam_fd_hw_io_buffer {
bool valid;
struct cam_buf_io_cfg *io_cfg;
uint32_t num_buf;
uint64_t io_addr[CAM_PACKET_MAX_PLANES];
uintptr_t cpu_addr[CAM_PACKET_MAX_PLANES];
};
/**
* struct cam_fd_hw_req_private : FD HW layer's private information
* specific to a request
*
* @ctx_hw_private : FD HW layer's ctx specific private data
* @request_id : Request ID corresponding to this private information
* @get_raw_results : Whether to get raw results for this request
* @ro_mode_enabled : Whether RO mode is enabled for this request
* @fd_results : Pointer to save face detection results
* @raw_results : Pointer to save face detection raw results
*/
struct cam_fd_hw_req_private {
void *ctx_hw_private;
uint64_t request_id;
bool get_raw_results;
bool ro_mode_enabled;
struct cam_fd_results *fd_results;
uint32_t *raw_results;
};
/**
* struct cam_fd_hw_reserve_args : Reserve args for this HW context
*
* @hw_ctx : HW context for which reserve is requested
* @mode : Mode for which this reserve is requested
* @ctx_hw_private : Pointer to save HW layer's private information specific
* to this hw context. This has to be passed while calling
* further HW layer calls
*/
struct cam_fd_hw_reserve_args {
void *hw_ctx;
enum cam_fd_hw_mode mode;
void *ctx_hw_private;
};
/**
* struct cam_fd_hw_release_args : Release args for this HW context
*
* @hw_ctx : HW context for which release is requested
* @ctx_hw_private : HW layer's private information specific to this hw context
*/
struct cam_fd_hw_release_args {
void *hw_ctx;
void *ctx_hw_private;
};
/**
* struct cam_fd_hw_init_args : Init args for this HW context
*
* @hw_ctx : HW context for which init is requested
* @ctx_hw_private : HW layer's private information specific to this hw context
*/
struct cam_fd_hw_init_args {
void *hw_ctx;
void *ctx_hw_private;
};
/**
* struct cam_fd_hw_deinit_args : Deinit args for this HW context
*
* @hw_ctx : HW context for which deinit is requested
* @ctx_hw_private : HW layer's private information specific to this hw context
*/
struct cam_fd_hw_deinit_args {
void *hw_ctx;
void *ctx_hw_private;
};
/**
* struct cam_fd_hw_cmd_prestart_args : Prestart command args
*
* @hw_ctx : HW context which submitted this prestart
* @ctx_hw_private : HW layer's private information specific to
* this hw context
* @request_id : Request ID corresponds to this pre-start command
* @get_raw_results : Whether to get raw results for this request
* @input_buf : Input IO Buffer information for this request
* @output_buf : Output IO Buffer information for this request
* @cmd_buf_addr : Command buffer address to fill kmd commands
* @size : Size available in command buffer
* @pre_config_buf_size : Buffer size filled with commands by KMD that has
* to be inserted before umd commands
* @post_config_buf_size : Buffer size filled with commands by KMD that has
* to be inserted after umd commands
* @hw_req_private : HW layer's private information specific to
* this request
*/
struct cam_fd_hw_cmd_prestart_args {
void *hw_ctx;
void *ctx_hw_private;
uint64_t request_id;
bool get_raw_results;
struct cam_fd_hw_io_buffer input_buf[CAM_FD_MAX_IO_BUFFERS];
struct cam_fd_hw_io_buffer output_buf[CAM_FD_MAX_IO_BUFFERS];
uint32_t *cmd_buf_addr;
uint32_t size;
uint32_t pre_config_buf_size;
uint32_t post_config_buf_size;
struct cam_fd_hw_req_private hw_req_private;
};
/**
* struct cam_fd_hw_cmd_start_args : Start command args
*
* @hw_ctx : HW context which submitting start command
* @ctx_hw_private : HW layer's private information specific to
* this hw context
* @hw_req_private : HW layer's private information specific to
* this request
* @hw_update_entries : HW update entries corresponds to this request
* @num_hw_update_entries : Number of hw update entries
*/
struct cam_fd_hw_cmd_start_args {
void *hw_ctx;
void *ctx_hw_private;
struct cam_fd_hw_req_private *hw_req_private;
struct cam_hw_update_entry *hw_update_entries;
uint32_t num_hw_update_entries;
};
/**
* struct cam_fd_hw_stop_args : Stop command args
*
* @hw_ctx : HW context which submitting stop command
* @ctx_hw_private : HW layer's private information specific to this hw context
* @request_id : Request ID that need to be stopped
* @hw_req_private : HW layer's private information specific to this request
*/
struct cam_fd_hw_stop_args {
void *hw_ctx;
void *ctx_hw_private;
uint64_t request_id;
struct cam_fd_hw_req_private *hw_req_private;
};
/**
* struct cam_fd_hw_frame_done_args : Frame done command args
*
* @hw_ctx : HW context which submitting frame done request
* @ctx_hw_private : HW layer's private information specific to this hw context
* @request_id : Request ID that need to be stopped
* @hw_req_private : HW layer's private information specific to this request
*/
struct cam_fd_hw_frame_done_args {
void *hw_ctx;
void *ctx_hw_private;
uint64_t request_id;
struct cam_fd_hw_req_private *hw_req_private;
};
/**
* struct cam_fd_hw_reset_args : Reset command args
*
* @hw_ctx : HW context which submitting reset command
* @ctx_hw_private : HW layer's private information specific to this hw context
*/
struct cam_fd_hw_reset_args {
void *hw_ctx;
void *ctx_hw_private;
};
/**
* struct cam_fd_hw_cmd_set_irq_cb : Set IRQ callback command args
*
* @cam_fd_hw_mgr_cb : HW Mgr's callback pointer
* @data : HW Mgr's private data
*/
struct cam_fd_hw_cmd_set_irq_cb {
int (*cam_fd_hw_mgr_cb)(void *data, enum cam_fd_hw_irq_type irq_type);
void *data;
};
#endif /* _CAM_FD_HW_INTF_H_ */

View file

@ -0,0 +1,283 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include "cam_fd_hw_core.h"
#include "cam_fd_hw_soc.h"
static bool cam_fd_hw_util_cpas_callback(uint32_t handle, void *userdata,
struct cam_cpas_irq_data *irq_data)
{
if (!irq_data)
return false;
CAM_DBG(CAM_FD, "CPAS hdl=%d, udata=%pK, irq_type=%d",
handle, userdata, irq_data->irq_type);
return false;
}
static int cam_fd_hw_soc_util_setup_regbase_indices(
struct cam_hw_soc_info *soc_info)
{
struct cam_fd_soc_private *soc_private =
(struct cam_fd_soc_private *)soc_info->soc_private;
uint32_t index;
int rc, i;
for (i = 0; i < CAM_FD_REG_MAX; i++)
soc_private->regbase_index[i] = -1;
if ((soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) ||
(soc_info->num_mem_block != CAM_FD_REG_MAX)) {
CAM_ERR(CAM_FD, "Invalid num_mem_block=%d",
soc_info->num_mem_block);
return -EINVAL;
}
rc = cam_common_util_get_string_index(soc_info->mem_block_name,
soc_info->num_mem_block, "fd_core", &index);
if ((rc == 0) && (index < CAM_FD_REG_MAX)) {
soc_private->regbase_index[CAM_FD_REG_CORE] = index;
} else {
CAM_ERR(CAM_FD, "regbase not found for FD_CORE, rc=%d, %d %d",
rc, index, CAM_FD_REG_MAX);
return -EINVAL;
}
rc = cam_common_util_get_string_index(soc_info->mem_block_name,
soc_info->num_mem_block, "fd_wrapper", &index);
if ((rc == 0) && (index < CAM_FD_REG_MAX)) {
soc_private->regbase_index[CAM_FD_REG_WRAPPER] = index;
} else {
CAM_ERR(CAM_FD, "regbase not found FD_WRAPPER, rc=%d, %d %d",
rc, index, CAM_FD_REG_MAX);
return -EINVAL;
}
CAM_DBG(CAM_FD, "Reg indices : CORE=%d, WRAPPER=%d",
soc_private->regbase_index[CAM_FD_REG_CORE],
soc_private->regbase_index[CAM_FD_REG_WRAPPER]);
return 0;
}
static int cam_fd_soc_set_clk_flags(struct cam_hw_soc_info *soc_info)
{
int i, rc = 0;
if (soc_info->num_clk > CAM_SOC_MAX_CLK) {
CAM_ERR(CAM_FD, "Invalid num clk %d", soc_info->num_clk);
return -EINVAL;
}
/* set memcore and mem periphery logic flags to 0 */
for (i = 0; i < soc_info->num_clk; i++) {
if ((strcmp(soc_info->clk_name[i], "fd_core_clk") == 0) ||
(strcmp(soc_info->clk_name[i], "fd_core_uar_clk") ==
0)) {
rc = cam_soc_util_set_clk_flags(soc_info, i,
CLKFLAG_NORETAIN_MEM);
if (rc)
CAM_ERR(CAM_FD,
"Failed in NORETAIN_MEM i=%d, rc=%d",
i, rc);
cam_soc_util_set_clk_flags(soc_info, i,
CLKFLAG_NORETAIN_PERIPH);
if (rc)
CAM_ERR(CAM_FD,
"Failed in NORETAIN_PERIPH i=%d, rc=%d",
i, rc);
}
}
return rc;
}
void cam_fd_soc_register_write(struct cam_hw_soc_info *soc_info,
enum cam_fd_reg_base reg_base, uint32_t reg_offset, uint32_t reg_value)
{
struct cam_fd_soc_private *soc_private =
(struct cam_fd_soc_private *)soc_info->soc_private;
int32_t reg_index = soc_private->regbase_index[reg_base];
CAM_DBG(CAM_FD, "FD_REG_WRITE: Base[%d] Offset[0x%8x] Value[0x%8x]",
reg_base, reg_offset, reg_value);
cam_io_w_mb(reg_value,
soc_info->reg_map[reg_index].mem_base + reg_offset);
}
uint32_t cam_fd_soc_register_read(struct cam_hw_soc_info *soc_info,
enum cam_fd_reg_base reg_base, uint32_t reg_offset)
{
struct cam_fd_soc_private *soc_private =
(struct cam_fd_soc_private *)soc_info->soc_private;
int32_t reg_index = soc_private->regbase_index[reg_base];
uint32_t reg_value;
reg_value = cam_io_r_mb(
soc_info->reg_map[reg_index].mem_base + reg_offset);
CAM_DBG(CAM_FD, "FD_REG_READ: Base[%d] Offset[0x%8x] Value[0x%8x]",
reg_base, reg_offset, reg_value);
return reg_value;
}
int cam_fd_soc_enable_resources(struct cam_hw_soc_info *soc_info)
{
struct cam_fd_soc_private *soc_private = soc_info->soc_private;
struct cam_ahb_vote ahb_vote;
struct cam_axi_vote axi_vote;
int rc;
ahb_vote.type = CAM_VOTE_ABSOLUTE;
ahb_vote.vote.level = CAM_SVS_VOTE;
axi_vote.compressed_bw = 7200000;
axi_vote.uncompressed_bw = 7200000;
rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote);
if (rc) {
CAM_ERR(CAM_FD, "Error in CPAS START, rc=%d", rc);
return -EFAULT;
}
rc = cam_soc_util_enable_platform_resource(soc_info, true, CAM_SVS_VOTE,
true);
if (rc) {
CAM_ERR(CAM_FD, "Error enable platform failed, rc=%d", rc);
goto stop_cpas;
}
return rc;
stop_cpas:
if (cam_cpas_stop(soc_private->cpas_handle))
CAM_ERR(CAM_FD, "Error in CPAS STOP");
return rc;
}
int cam_fd_soc_disable_resources(struct cam_hw_soc_info *soc_info)
{
struct cam_fd_soc_private *soc_private;
int rc = 0;
if (!soc_info) {
CAM_ERR(CAM_FD, "Invalid soc_info param");
return -EINVAL;
}
soc_private = soc_info->soc_private;
rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
if (rc) {
CAM_ERR(CAM_FD, "disable platform resources failed, rc=%d", rc);
return rc;
}
rc = cam_cpas_stop(soc_private->cpas_handle);
if (rc) {
CAM_ERR(CAM_FD, "Error in CPAS STOP, handle=0x%x, rc=%d",
soc_private->cpas_handle, rc);
return rc;
}
return rc;
}
int cam_fd_soc_init_resources(struct cam_hw_soc_info *soc_info,
irq_handler_t irq_handler, void *private_data)
{
struct cam_fd_soc_private *soc_private;
struct cam_cpas_register_params cpas_register_param;
int rc;
rc = cam_soc_util_get_dt_properties(soc_info);
if (rc) {
CAM_ERR(CAM_FD, "Failed in get_dt_properties, rc=%d", rc);
return rc;
}
rc = cam_soc_util_request_platform_resource(soc_info, irq_handler,
private_data);
if (rc) {
CAM_ERR(CAM_FD, "Failed in request_platform_resource rc=%d",
rc);
return rc;
}
rc = cam_fd_soc_set_clk_flags(soc_info);
if (rc) {
CAM_ERR(CAM_FD, "failed in set_clk_flags rc=%d", rc);
goto release_res;
}
soc_private = kzalloc(sizeof(struct cam_fd_soc_private), GFP_KERNEL);
if (!soc_private) {
rc = -ENOMEM;
goto release_res;
}
soc_info->soc_private = soc_private;
rc = cam_fd_hw_soc_util_setup_regbase_indices(soc_info);
if (rc) {
CAM_ERR(CAM_FD, "Failed in setup regbase, rc=%d", rc);
goto free_soc_private;
}
memset(&cpas_register_param, 0, sizeof(cpas_register_param));
strlcpy(cpas_register_param.identifier, "fd", CAM_HW_IDENTIFIER_LENGTH);
cpas_register_param.cell_index = soc_info->index;
cpas_register_param.dev = &soc_info->pdev->dev;
cpas_register_param.userdata = private_data;
cpas_register_param.cam_cpas_client_cb = cam_fd_hw_util_cpas_callback;
rc = cam_cpas_register_client(&cpas_register_param);
if (rc) {
CAM_ERR(CAM_FD, "CPAS registration failed");
goto free_soc_private;
}
soc_private->cpas_handle = cpas_register_param.client_handle;
CAM_DBG(CAM_FD, "CPAS handle=%d", soc_private->cpas_handle);
return rc;
free_soc_private:
kfree(soc_info->soc_private);
soc_info->soc_private = NULL;
release_res:
cam_soc_util_release_platform_resource(soc_info);
return rc;
}
int cam_fd_soc_deinit_resources(struct cam_hw_soc_info *soc_info)
{
struct cam_fd_soc_private *soc_private =
(struct cam_fd_soc_private *)soc_info->soc_private;
int rc;
rc = cam_cpas_unregister_client(soc_private->cpas_handle);
if (rc)
CAM_ERR(CAM_FD, "Unregister cpas failed, handle=%d, rc=%d",
soc_private->cpas_handle, rc);
rc = cam_soc_util_release_platform_resource(soc_info);
if (rc)
CAM_ERR(CAM_FD, "release platform failed, rc=%d", rc);
kfree(soc_info->soc_private);
soc_info->soc_private = NULL;
return rc;
}

View file

@ -0,0 +1,46 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_FD_HW_SOC_H_
#define _CAM_FD_HW_SOC_H_
#include "cam_soc_util.h"
/**
* enum cam_fd_reg_base - Enum for FD register sets
*
* @CAM_FD_REG_CORE : Indicates FD Core register space
* @CAM_FD_REG_WRAPPER : Indicates FD Wrapper register space
* @CAM_FD_REG_MAX : Max number of register sets supported
*/
enum cam_fd_reg_base {
CAM_FD_REG_CORE,
CAM_FD_REG_WRAPPER,
CAM_FD_REG_MAX
};
/**
* struct cam_fd_soc_private : FD private SOC information
*
* @regbase_index : Mapping between Register base enum to register index in SOC
* @cpas_handle : CPAS handle
*
*/
struct cam_fd_soc_private {
int32_t regbase_index[CAM_FD_REG_MAX];
uint32_t cpas_handle;
};
int cam_fd_soc_init_resources(struct cam_hw_soc_info *soc_info,
irq_handler_t irq_handler, void *private_data);
int cam_fd_soc_deinit_resources(struct cam_hw_soc_info *soc_info);
int cam_fd_soc_enable_resources(struct cam_hw_soc_info *soc_info);
int cam_fd_soc_disable_resources(struct cam_hw_soc_info *soc_info);
uint32_t cam_fd_soc_register_read(struct cam_hw_soc_info *soc_info,
enum cam_fd_reg_base reg_base, uint32_t reg_offset);
void cam_fd_soc_register_write(struct cam_hw_soc_info *soc_info,
enum cam_fd_reg_base reg_base, uint32_t reg_offset, uint32_t reg_value);
#endif /* _CAM_FD_HW_SOC_H_ */

View file

@ -0,0 +1,63 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_FD_HW_V41_H_
#define _CAM_FD_HW_V41_H_
static struct cam_fd_hw_static_info cam_fd_wrapper120_core410_info = {
.core_version = {
.major = 4,
.minor = 1,
.incr = 0,
},
.wrapper_version = {
.major = 1,
.minor = 2,
.incr = 0,
},
.core_regs = {
.version = 0x38,
.control = 0x0,
.result_cnt = 0x4,
.result_addr = 0x20,
.image_addr = 0x24,
.work_addr = 0x28,
.ro_mode = 0x34,
.results_reg_base = 0x400,
.raw_results_reg_base = 0x800,
},
.wrapper_regs = {
.wrapper_version = 0x0,
.cgc_disable = 0x4,
.hw_stop = 0x8,
.sw_reset = 0x10,
.vbif_req_priority = 0x20,
.vbif_priority_level = 0x24,
.vbif_done_status = 0x34,
.irq_mask = 0x50,
.irq_status = 0x54,
.irq_clear = 0x58,
},
.results = {
.max_faces = 35,
.per_face_entries = 4,
.raw_results_available = true,
.raw_results_entries = 512,
},
.enable_errata_wa = {
.single_irq_only = true,
.ro_mode_enable_always = true,
.ro_mode_results_invalid = true,
},
.irq_mask = CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_FRAME_DONE) |
CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_HALT_DONE) |
CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_RESET_DONE),
.qos_priority = 4,
.qos_priority_level = 4,
.supported_modes = CAM_FD_MODE_FACEDETECTION | CAM_FD_MODE_PYRAMID,
.ro_mode_supported = true,
};
#endif /* _CAM_FD_HW_V41_H_ */

View file

@ -0,0 +1,63 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_FD_HW_V501_H_
#define _CAM_FD_HW_V501_H_
static struct cam_fd_hw_static_info cam_fd_wrapper200_core501_info = {
.core_version = {
.major = 5,
.minor = 0,
.incr = 1,
},
.wrapper_version = {
.major = 2,
.minor = 0,
.incr = 0,
},
.core_regs = {
.version = 0x38,
.control = 0x0,
.result_cnt = 0x4,
.result_addr = 0x20,
.image_addr = 0x24,
.work_addr = 0x28,
.ro_mode = 0x34,
.results_reg_base = 0x400,
.raw_results_reg_base = 0x800,
},
.wrapper_regs = {
.wrapper_version = 0x0,
.cgc_disable = 0x4,
.hw_stop = 0x8,
.sw_reset = 0x10,
.vbif_req_priority = 0x20,
.vbif_priority_level = 0x24,
.vbif_done_status = 0x34,
.irq_mask = 0x50,
.irq_status = 0x54,
.irq_clear = 0x58,
},
.results = {
.max_faces = 35,
.per_face_entries = 4,
.raw_results_available = true,
.raw_results_entries = 512,
},
.enable_errata_wa = {
.single_irq_only = true,
.ro_mode_enable_always = true,
.ro_mode_results_invalid = true,
},
.irq_mask = CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_FRAME_DONE) |
CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_HALT_DONE) |
CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_RESET_DONE),
.qos_priority = 4,
.qos_priority_level = 4,
.supported_modes = CAM_FD_MODE_FACEDETECTION | CAM_FD_MODE_PYRAMID,
.ro_mode_supported = true,
};
#endif /* _CAM_FD_HW_V501_H_ */

View file

@ -0,0 +1,16 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/fw_inc
ccflags-y += -Idrivers/media/platform/msm/camera
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
obj-$(CONFIG_SPECTRA_CAMERA) += icp_hw/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp_subdev.o cam_icp_context.o hfi.o

View file

@ -0,0 +1,249 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/debugfs.h>
#include <linux/videodev2.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <media/cam_sync.h>
#include <media/cam_defs.h>
#include "cam_sync_api.h"
#include "cam_node.h"
#include "cam_context.h"
#include "cam_context_utils.h"
#include "cam_icp_context.h"
#include "cam_req_mgr_util.h"
#include "cam_mem_mgr.h"
#include "cam_trace.h"
#include "cam_debug_util.h"
#include "cam_packet_util.h"
static const char icp_dev_name[] = "icp";
static int cam_icp_context_dump_active_request(void *data, unsigned long iova,
uint32_t buf_info)
{
struct cam_context *ctx = (struct cam_context *)data;
struct cam_ctx_request *req = NULL;
struct cam_ctx_request *req_temp = NULL;
struct cam_hw_mgr_dump_pf_data *pf_dbg_entry = NULL;
int rc = 0;
bool b_mem_found = false;
if (!ctx) {
CAM_ERR(CAM_ICP, "Invalid ctx");
return -EINVAL;
}
CAM_INFO(CAM_ICP, "iommu fault for icp ctx %d state %d",
ctx->ctx_id, ctx->state);
list_for_each_entry_safe(req, req_temp,
&ctx->active_req_list, list) {
pf_dbg_entry = &(req->pf_data);
CAM_INFO(CAM_ICP, "req_id : %lld", req->request_id);
rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry->packet,
iova, buf_info, &b_mem_found);
if (rc)
CAM_ERR(CAM_ICP, "Failed to dump pf info");
if (b_mem_found)
CAM_ERR(CAM_ICP, "Found page fault in req %lld %d",
req->request_id, rc);
}
return rc;
}
static int __cam_icp_acquire_dev_in_available(struct cam_context *ctx,
struct cam_acquire_dev_cmd *cmd)
{
int rc;
rc = cam_context_acquire_dev_to_hw(ctx, cmd);
if (!rc) {
ctx->state = CAM_CTX_ACQUIRED;
trace_cam_context_state("ICP", ctx);
}
return rc;
}
static int __cam_icp_release_dev_in_acquired(struct cam_context *ctx,
struct cam_release_dev_cmd *cmd)
{
int rc;
rc = cam_context_release_dev_to_hw(ctx, cmd);
if (rc)
CAM_ERR(CAM_ICP, "Unable to release device");
ctx->state = CAM_CTX_AVAILABLE;
trace_cam_context_state("ICP", ctx);
return rc;
}
static int __cam_icp_start_dev_in_acquired(struct cam_context *ctx,
struct cam_start_stop_dev_cmd *cmd)
{
int rc;
rc = cam_context_start_dev_to_hw(ctx, cmd);
if (!rc) {
ctx->state = CAM_CTX_READY;
trace_cam_context_state("ICP", ctx);
}
return rc;
}
static int __cam_icp_flush_dev_in_ready(struct cam_context *ctx,
struct cam_flush_dev_cmd *cmd)
{
int rc;
rc = cam_context_flush_dev_to_hw(ctx, cmd);
if (rc)
CAM_ERR(CAM_ICP, "Failed to flush device");
return rc;
}
static int __cam_icp_config_dev_in_ready(struct cam_context *ctx,
struct cam_config_dev_cmd *cmd)
{
int rc;
rc = cam_context_prepare_dev_to_hw(ctx, cmd);
if (rc)
CAM_ERR(CAM_ICP, "Failed to prepare device");
return rc;
}
static int __cam_icp_stop_dev_in_ready(struct cam_context *ctx,
struct cam_start_stop_dev_cmd *cmd)
{
int rc;
rc = cam_context_stop_dev_to_hw(ctx);
if (rc)
CAM_ERR(CAM_ICP, "Failed to stop device");
ctx->state = CAM_CTX_ACQUIRED;
trace_cam_context_state("ICP", ctx);
return rc;
}
static int __cam_icp_release_dev_in_ready(struct cam_context *ctx,
struct cam_release_dev_cmd *cmd)
{
int rc;
rc = __cam_icp_stop_dev_in_ready(ctx, NULL);
if (rc)
CAM_ERR(CAM_ICP, "Failed to stop device");
rc = __cam_icp_release_dev_in_acquired(ctx, cmd);
if (rc)
CAM_ERR(CAM_ICP, "Failed to release device");
return rc;
}
static int __cam_icp_handle_buf_done_in_ready(void *ctx,
uint32_t evt_id, void *done)
{
return cam_context_buf_done_from_hw(ctx, done, evt_id);
}
static struct cam_ctx_ops
cam_icp_ctx_state_machine[CAM_CTX_STATE_MAX] = {
/* Uninit */
{
.ioctl_ops = {},
.crm_ops = {},
.irq_ops = NULL,
},
/* Available */
{
.ioctl_ops = {
.acquire_dev = __cam_icp_acquire_dev_in_available,
},
.crm_ops = {},
.irq_ops = NULL,
},
/* Acquired */
{
.ioctl_ops = {
.release_dev = __cam_icp_release_dev_in_acquired,
.start_dev = __cam_icp_start_dev_in_acquired,
.config_dev = __cam_icp_config_dev_in_ready,
.flush_dev = __cam_icp_flush_dev_in_ready,
},
.crm_ops = {},
.irq_ops = __cam_icp_handle_buf_done_in_ready,
.pagefault_ops = cam_icp_context_dump_active_request,
},
/* Ready */
{
.ioctl_ops = {
.stop_dev = __cam_icp_stop_dev_in_ready,
.release_dev = __cam_icp_release_dev_in_ready,
.config_dev = __cam_icp_config_dev_in_ready,
.flush_dev = __cam_icp_flush_dev_in_ready,
},
.crm_ops = {},
.irq_ops = __cam_icp_handle_buf_done_in_ready,
.pagefault_ops = cam_icp_context_dump_active_request,
},
/* Activated */
{
.ioctl_ops = {},
.crm_ops = {},
.irq_ops = NULL,
.pagefault_ops = cam_icp_context_dump_active_request,
},
};
int cam_icp_context_init(struct cam_icp_context *ctx,
struct cam_hw_mgr_intf *hw_intf, uint32_t ctx_id)
{
int rc;
if ((!ctx) || (!ctx->base) || (!hw_intf)) {
CAM_ERR(CAM_ICP, "Invalid params: %pK %pK", ctx, hw_intf);
rc = -EINVAL;
goto err;
}
rc = cam_context_init(ctx->base, icp_dev_name, CAM_ICP, ctx_id,
NULL, hw_intf, ctx->req_base, CAM_CTX_REQ_MAX);
if (rc) {
CAM_ERR(CAM_ICP, "Camera Context Base init failed");
goto err;
}
ctx->base->state_machine = cam_icp_ctx_state_machine;
ctx->base->ctx_priv = ctx;
ctx->ctxt_to_hw_map = NULL;
err:
return rc;
}
int cam_icp_context_deinit(struct cam_icp_context *ctx)
{
if ((!ctx) || (!ctx->base)) {
CAM_ERR(CAM_ICP, "Invalid params: %pK", ctx);
return -EINVAL;
}
cam_context_deinit(ctx->base);
memset(ctx, 0, sizeof(*ctx));
return 0;
}

View file

@ -0,0 +1,42 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_ICP_CONTEXT_H_
#define _CAM_ICP_CONTEXT_H_
#include "cam_context.h"
/**
* struct cam_icp_context - icp context
* @base: icp context object
* @state_machine: state machine for ICP context
* @req_base: common request structure
* @state: icp context state
* @ctxt_to_hw_map: context to FW handle mapping
*/
struct cam_icp_context {
struct cam_context *base;
struct cam_ctx_ops *state_machine;
struct cam_ctx_request req_base[CAM_CTX_REQ_MAX];
uint32_t state;
void *ctxt_to_hw_map;
};
/**
* cam_icp_context_init() - ICP context init
* @ctx: Pointer to context
* @hw_intf: Pointer to ICP hardware interface
* @ctx_id: ID for this context
*/
int cam_icp_context_init(struct cam_icp_context *ctx,
struct cam_hw_mgr_intf *hw_intf, uint32_t ctx_id);
/**
* cam_icp_context_deinit() - ICP context deinit
* @ctx: Pointer to context
*/
int cam_icp_context_deinit(struct cam_icp_context *ctx);
#endif /* _CAM_ICP_CONTEXT_H_ */

View file

@ -0,0 +1,275 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/iommu.h>
#include <linux/timer.h>
#include <linux/platform_device.h>
#include <linux/videodev2.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-subdev.h>
#include <media/cam_req_mgr.h>
#include <media/cam_defs.h>
#include <media/cam_icp.h>
#include "cam_req_mgr_dev.h"
#include "cam_subdev.h"
#include "cam_node.h"
#include "cam_context.h"
#include "cam_icp_context.h"
#include "cam_hw_mgr_intf.h"
#include "cam_icp_hw_mgr_intf.h"
#include "cam_debug_util.h"
#include "cam_smmu_api.h"
#define CAM_ICP_DEV_NAME "cam-icp"
struct cam_icp_subdev {
struct cam_subdev sd;
struct cam_node *node;
struct cam_context ctx[CAM_ICP_CTX_MAX];
struct cam_icp_context ctx_icp[CAM_ICP_CTX_MAX];
struct mutex icp_lock;
int32_t open_cnt;
int32_t reserved;
};
static struct cam_icp_subdev g_icp_dev;
static const struct of_device_id cam_icp_dt_match[] = {
{.compatible = "qcom,cam-icp"},
{}
};
static void cam_icp_dev_iommu_fault_handler(
struct iommu_domain *domain, struct device *dev, unsigned long iova,
int flags, void *token, uint32_t buf_info)
{
int i = 0;
struct cam_node *node = NULL;
if (!token) {
CAM_ERR(CAM_ICP, "invalid token in page handler cb");
return;
}
node = (struct cam_node *)token;
for (i = 0; i < node->ctx_size; i++)
cam_context_dump_pf_info(&(node->ctx_list[i]), iova,
buf_info);
}
static int cam_icp_subdev_open(struct v4l2_subdev *sd,
struct v4l2_subdev_fh *fh)
{
struct cam_hw_mgr_intf *hw_mgr_intf = NULL;
struct cam_node *node = v4l2_get_subdevdata(sd);
int rc = 0;
mutex_lock(&g_icp_dev.icp_lock);
if (g_icp_dev.open_cnt >= 1) {
CAM_ERR(CAM_ICP, "ICP subdev is already opened");
rc = -EALREADY;
goto end;
}
if (!node) {
CAM_ERR(CAM_ICP, "Invalid args");
rc = -EINVAL;
goto end;
}
hw_mgr_intf = &node->hw_mgr_intf;
rc = hw_mgr_intf->hw_open(hw_mgr_intf->hw_mgr_priv, NULL);
if (rc < 0) {
CAM_ERR(CAM_ICP, "FW download failed");
goto end;
}
g_icp_dev.open_cnt++;
end:
mutex_unlock(&g_icp_dev.icp_lock);
return rc;
}
static int cam_icp_subdev_close(struct v4l2_subdev *sd,
struct v4l2_subdev_fh *fh)
{
int rc = 0;
struct cam_hw_mgr_intf *hw_mgr_intf = NULL;
struct cam_node *node = v4l2_get_subdevdata(sd);
mutex_lock(&g_icp_dev.icp_lock);
if (g_icp_dev.open_cnt <= 0) {
CAM_DBG(CAM_ICP, "ICP subdev is already closed");
rc = -EINVAL;
goto end;
}
g_icp_dev.open_cnt--;
if (!node) {
CAM_ERR(CAM_ICP, "Invalid args");
rc = -EINVAL;
goto end;
}
hw_mgr_intf = &node->hw_mgr_intf;
if (!hw_mgr_intf) {
CAM_ERR(CAM_ICP, "hw_mgr_intf is not initialized");
rc = -EINVAL;
goto end;
}
rc = cam_node_shutdown(node);
if (rc < 0) {
CAM_ERR(CAM_ICP, "HW close failed");
goto end;
}
end:
mutex_unlock(&g_icp_dev.icp_lock);
return 0;
}
const struct v4l2_subdev_internal_ops cam_icp_subdev_internal_ops = {
.open = cam_icp_subdev_open,
.close = cam_icp_subdev_close,
};
static int cam_icp_probe(struct platform_device *pdev)
{
int rc = 0, i = 0;
struct cam_node *node;
struct cam_hw_mgr_intf *hw_mgr_intf;
int iommu_hdl = -1;
if (!pdev) {
CAM_ERR(CAM_ICP, "pdev is NULL");
return -EINVAL;
}
g_icp_dev.sd.pdev = pdev;
g_icp_dev.sd.internal_ops = &cam_icp_subdev_internal_ops;
rc = cam_subdev_probe(&g_icp_dev.sd, pdev, CAM_ICP_DEV_NAME,
CAM_ICP_DEVICE_TYPE);
if (rc) {
CAM_ERR(CAM_ICP, "ICP cam_subdev_probe failed");
goto probe_fail;
}
node = (struct cam_node *) g_icp_dev.sd.token;
hw_mgr_intf = kzalloc(sizeof(*hw_mgr_intf), GFP_KERNEL);
if (!hw_mgr_intf) {
rc = -EINVAL;
goto hw_alloc_fail;
}
rc = cam_icp_hw_mgr_init(pdev->dev.of_node, (uint64_t *)hw_mgr_intf,
&iommu_hdl);
if (rc) {
CAM_ERR(CAM_ICP, "ICP HW manager init failed: %d", rc);
goto hw_init_fail;
}
for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
g_icp_dev.ctx_icp[i].base = &g_icp_dev.ctx[i];
rc = cam_icp_context_init(&g_icp_dev.ctx_icp[i],
hw_mgr_intf, i);
if (rc) {
CAM_ERR(CAM_ICP, "ICP context init failed");
goto ctx_fail;
}
}
rc = cam_node_init(node, hw_mgr_intf, g_icp_dev.ctx,
CAM_ICP_CTX_MAX, CAM_ICP_DEV_NAME);
if (rc) {
CAM_ERR(CAM_ICP, "ICP node init failed");
goto ctx_fail;
}
cam_smmu_set_client_page_fault_handler(iommu_hdl,
cam_icp_dev_iommu_fault_handler, node);
g_icp_dev.open_cnt = 0;
mutex_init(&g_icp_dev.icp_lock);
CAM_DBG(CAM_ICP, "ICP probe complete");
return rc;
ctx_fail:
for (--i; i >= 0; i--)
cam_icp_context_deinit(&g_icp_dev.ctx_icp[i]);
hw_init_fail:
kfree(hw_mgr_intf);
hw_alloc_fail:
cam_subdev_remove(&g_icp_dev.sd);
probe_fail:
return rc;
}
static int cam_icp_remove(struct platform_device *pdev)
{
int i;
struct v4l2_subdev *sd;
struct cam_subdev *subdev;
if (!pdev) {
CAM_ERR(CAM_ICP, "pdev is NULL");
return -ENODEV;
}
sd = platform_get_drvdata(pdev);
if (!sd) {
CAM_ERR(CAM_ICP, "V4l2 subdev is NULL");
return -ENODEV;
}
subdev = v4l2_get_subdevdata(sd);
if (!subdev) {
CAM_ERR(CAM_ICP, "cam subdev is NULL");
return -ENODEV;
}
for (i = 0; i < CAM_ICP_CTX_MAX; i++)
cam_icp_context_deinit(&g_icp_dev.ctx_icp[i]);
cam_node_deinit(g_icp_dev.node);
cam_subdev_remove(&g_icp_dev.sd);
mutex_destroy(&g_icp_dev.icp_lock);
return 0;
}
static struct platform_driver cam_icp_driver = {
.probe = cam_icp_probe,
.remove = cam_icp_remove,
.driver = {
.name = "cam_icp",
.owner = THIS_MODULE,
.of_match_table = cam_icp_dt_match,
.suppress_bind_attrs = true,
},
};
static int __init cam_icp_init_module(void)
{
return platform_driver_register(&cam_icp_driver);
}
static void __exit cam_icp_exit_module(void)
{
platform_driver_unregister(&cam_icp_driver);
}
module_init(cam_icp_init_module);
module_exit(cam_icp_exit_module);
MODULE_DESCRIPTION("MSM ICP driver");
MODULE_LICENSE("GPL v2");

View file

@ -0,0 +1,157 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _HFI_INTF_H_
#define _HFI_INTF_H_
#include <linux/types.h>
/**
* struct hfi_mem
* @len: length of memory
* @kva: kernel virtual address
* @iova: IO virtual address
* @reserved: reserved field
*/
struct hfi_mem {
uint64_t len;
uintptr_t kva;
uint32_t iova;
uint32_t reserved;
};
/**
* struct hfi_mem_info
* @qtbl: qtable hfi memory
* @cmd_q: command queue hfi memory for host to firmware communication
* @msg_q: message queue hfi memory for firmware to host communication
* @dbg_q: debug queue hfi memory for firmware debug information
* @sfr_buf: buffer for subsystem failure reason[SFR]
* @sec_heap: secondary heap hfi memory for firmware
* @qdss: qdss mapped memory for fw
* @icp_base: icp base address
*/
struct hfi_mem_info {
struct hfi_mem qtbl;
struct hfi_mem cmd_q;
struct hfi_mem msg_q;
struct hfi_mem dbg_q;
struct hfi_mem sfr_buf;
struct hfi_mem sec_heap;
struct hfi_mem shmem;
struct hfi_mem qdss;
void __iomem *icp_base;
};
/**
* hfi_write_cmd() - function for hfi write
* @cmd_ptr: pointer to command data for hfi write
*
* Returns success(zero)/failure(non zero)
*/
int hfi_write_cmd(void *cmd_ptr);
/**
* hfi_read_message() - function for hfi read
* @pmsg: buffer to place read message for hfi queue
* @q_id: queue id
* @words_read: total number of words read from the queue
* returned as output to the caller
*
* Returns success(zero)/failure(non zero)
*/
int hfi_read_message(uint32_t *pmsg, uint8_t q_id, uint32_t *words_read);
/**
* hfi_init() - function initialize hfi after firmware download
* @event_driven_mode: event mode
* @hfi_mem: hfi memory info
* @icp_base: icp base address
* @debug: debug flag
*
* Returns success(zero)/failure(non zero)
*/
int cam_hfi_init(uint8_t event_driven_mode, struct hfi_mem_info *hfi_mem,
void *__iomem icp_base, bool debug);
/**
* hfi_get_hw_caps() - hardware capabilities from firmware
* @query_caps: holds query information from hfi
*
* Returns success(zero)/failure(non zero)
*/
int hfi_get_hw_caps(void *query_caps);
/**
* hfi_send_system_cmd() - send hfi system command to firmware
* @type: type of system command
* @data: command data
* @size: size of command data
*/
void hfi_send_system_cmd(uint32_t type, uint64_t data, uint32_t size);
/**
* cam_hfi_enable_cpu() - enable A5 CPU
* @icp_base: icp base address
*/
void cam_hfi_enable_cpu(void __iomem *icp_base);
/**
* cam_hfi_disable_cpu() - disable A5 CPU
* @icp_base: icp base address
*/
void cam_hfi_disable_cpu(void __iomem *icp_base);
/**
* cam_hfi_deinit() - cleanup HFI
*/
void cam_hfi_deinit(void __iomem *icp_base);
/**
* hfi_set_debug_level() - set debug level
* @a5_dbg_type: 1 for debug_q & 2 for qdss
* @lvl: FW debug message level
*/
int hfi_set_debug_level(u64 a5_dbg_type, uint32_t lvl);
/**
* hfi_set_fw_dump_level() - set firmware dump level
* @lvl: level of firmware dump level
*/
int hfi_set_fw_dump_level(uint32_t lvl);
/**
* hfi_enable_ipe_bps_pc() - Enable interframe pc
* Host sends a command to firmware to enable interframe
* power collapse for IPE and BPS hardware.
*
* @enable: flag to enable/disable
* @core_info: Core information to firmware
*/
int hfi_enable_ipe_bps_pc(bool enable, uint32_t core_info);
/**
* hfi_cmd_ubwc_config() - UBWC configuration to firmware
* @ubwc_cfg: UBWC configuration parameters
*/
int hfi_cmd_ubwc_config(uint32_t *ubwc_cfg);
/**
* cam_hfi_resume() - function to resume
* @hfi_mem: hfi memory info
* @icp_base: icp base address
* @debug: debug flag
*
* Returns success(zero)/failure(non zero)
*/
int cam_hfi_resume(struct hfi_mem_info *hfi_mem,
void __iomem *icp_base, bool debug);
/**
* cam_hfi_queue_dump() - utility function to dump hfi queues
*/
void cam_hfi_queue_dump(void);
#endif /* _HFI_INTF_H_ */

View file

@ -0,0 +1,333 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_HFI_REG_H_
#define _CAM_HFI_REG_H_
#include <linux/types.h>
#include "hfi_intf.h"
/* start of ICP CSR registers */
#define HFI_REG_A5_HW_VERSION 0x0
#define HFI_REG_A5_CSR_NSEC_RESET 0x4
#define HFI_REG_A5_CSR_A5_CONTROL 0x8
#define HFI_REG_A5_CSR_ETM 0xC
#define HFI_REG_A5_CSR_A2HOSTINTEN 0x10
#define HFI_REG_A5_CSR_A2HOSTINT 0x14
#define HFI_REG_A5_CSR_A2HOSTINTCLR 0x18
#define HFI_REG_A5_CSR_A2HOSTINTSTATUS 0x1C
#define HFI_REG_A5_CSR_A2HOSTINTSET 0x20
#define HFI_REG_A5_CSR_HOST2ICPINT 0x30
#define HFI_REG_A5_CSR_A5_STATUS 0x200
#define HFI_REG_A5_QGIC2_LM_ID 0x204
#define HFI_REG_A5_SPARE 0x400
/* general purpose registers from */
#define HFI_REG_FW_VERSION 0x44
#define HFI_REG_HOST_ICP_INIT_REQUEST 0x48
#define HFI_REG_ICP_HOST_INIT_RESPONSE 0x4C
#define HFI_REG_SHARED_MEM_PTR 0x50
#define HFI_REG_SHARED_MEM_SIZE 0x54
#define HFI_REG_QTBL_PTR 0x58
#define HFI_REG_UNCACHED_HEAP_PTR 0x5C
#define HFI_REG_UNCACHED_HEAP_SIZE 0x60
#define HFI_REG_QDSS_IOVA 0x6C
#define HFI_REG_QDSS_IOVA_SIZE 0x70
#define HFI_REG_SFR_PTR 0x68
/* end of ICP CSR registers */
/* flags for ICP CSR registers */
#define ICP_FLAG_CSR_WAKE_UP_EN (1 << 4)
#define ICP_FLAG_CSR_A5_EN (1 << 9)
#define ICP_CSR_EN_CLKGATE_WFI (1 << 12)
#define ICP_CSR_EDBGRQ (1 << 14)
#define ICP_CSR_DBGSWENABLE (1 << 22)
#define ICP_CSR_A5_STATUS_WFI (1 << 7)
#define ICP_FLAG_A5_CTRL_DBG_EN (ICP_FLAG_CSR_WAKE_UP_EN|\
ICP_FLAG_CSR_A5_EN|\
ICP_CSR_EDBGRQ|\
ICP_CSR_DBGSWENABLE)
#define ICP_FLAG_A5_CTRL_EN (ICP_FLAG_CSR_WAKE_UP_EN|\
ICP_FLAG_CSR_A5_EN|\
ICP_CSR_EN_CLKGATE_WFI)
/* start of Queue table and queues */
#define MAX_ICP_HFI_QUEUES 4
#define ICP_QHDR_TX_TYPE_MASK 0xFF000000
#define ICP_QHDR_RX_TYPE_MASK 0x00FF0000
#define ICP_QHDR_PRI_TYPE_MASK 0x0000FF00
#define ICP_QHDR_Q_ID_MASK 0x000000FF
#define ICP_CMD_Q_SIZE_IN_BYTES 4096
#define ICP_MSG_Q_SIZE_IN_BYTES 4096
#define ICP_DBG_Q_SIZE_IN_BYTES 102400
#define ICP_MSG_SFR_SIZE_IN_BYTES 4096
#define ICP_SHARED_MEM_IN_BYTES (1024 * 1024)
#define ICP_UNCACHED_HEAP_SIZE_IN_BYTES (2 * 1024 * 1024)
#define ICP_HFI_MAX_PKT_SIZE_IN_WORDS 25600
#define ICP_HFI_MAX_PKT_SIZE_MSGQ_IN_WORDS 256
#define ICP_HFI_QTBL_HOSTID1 0x01000000
#define ICP_HFI_QTBL_STATUS_ENABLED 0x00000001
#define ICP_HFI_NUMBER_OF_QS 3
#define ICP_HFI_NUMBER_OF_ACTIVE_QS 3
#define ICP_HFI_QTBL_OFFSET 0
#define ICP_HFI_VAR_SIZE_PKT 0
#define ICP_HFI_MAX_MSG_SIZE_IN_WORDS 128
/* Queue Header type masks. Use these to access bitfields in qhdr_type */
#define HFI_MASK_QHDR_TX_TYPE 0xFF000000
#define HFI_MASK_QHDR_RX_TYPE 0x00FF0000
#define HFI_MASK_QHDR_PRI_TYPE 0x0000FF00
#define HFI_MASK_QHDR_Q_ID_TYPE 0x000000FF
#define TX_EVENT_DRIVEN_MODE_1 0
#define RX_EVENT_DRIVEN_MODE_1 0
#define TX_EVENT_DRIVEN_MODE_2 0x01000000
#define RX_EVENT_DRIVEN_MODE_2 0x00010000
#define TX_EVENT_POLL_MODE_2 0x02000000
#define RX_EVENT_POLL_MODE_2 0x00020000
#define U32_OFFSET 0x1
#define BYTE_WORD_SHIFT 2
/**
* @INVALID: Invalid state
* @HFI_DEINIT: HFI is not initialized yet
* @HFI_INIT: HFI is initialized
* @HFI_READY: HFI is ready to send/receive commands/messages
*/
enum hfi_state {
HFI_DEINIT,
HFI_INIT,
HFI_READY
};
/**
* @RESET: init success
* @SET: init failed
*/
enum reg_settings {
RESET,
SET,
SET_WM = 1024
};
/**
* @INTR_DISABLE: Disable interrupt
* @INTR_ENABLE: Enable interrupt
* @INTR_ENABLE_WD0: Enable WD0
* @INTR_ENABLE_WD1: Enable WD1
*/
enum intr_status {
INTR_DISABLE,
INTR_ENABLE,
INTR_ENABLE_WD0,
INTR_ENABLE_WD1 = 0x4
};
/**
* @ICP_INIT_RESP_RESET: reset init state
* @ICP_INIT_RESP_SUCCESS: init success
* @ICP_INIT_RESP_FAILED: init failed
*/
enum host_init_resp {
ICP_INIT_RESP_RESET,
ICP_INIT_RESP_SUCCESS,
ICP_INIT_RESP_FAILED
};
/**
* @ICP_INIT_REQUEST_RESET: reset init request
* @ICP_INIT_REQUEST_SET: set init request
*/
enum host_init_request {
ICP_INIT_REQUEST_RESET,
ICP_INIT_REQUEST_SET
};
/**
* @QHDR_INACTIVE: Queue is inactive
* @QHDR_ACTIVE: Queue is active
*/
enum qhdr_status {
QHDR_INACTIVE,
QHDR_ACTIVE
};
/**
* @INTR_MODE: event driven mode 1, each send and receive generates interrupt
* @WM_MODE: event driven mode 2, interrupts based on watermark mechanism
* @POLL_MODE: poll method
*/
enum qhdr_event_drv_type {
INTR_MODE,
WM_MODE,
POLL_MODE
};
/**
* @TX_INT: event driven mode 1, each send and receive generates interrupt
* @TX_INT_WM: event driven mode 2, interrupts based on watermark mechanism
* @TX_POLL: poll method
* @ICP_QHDR_TX_TYPE_MASK defines position in qhdr_type
*/
enum qhdr_tx_type {
TX_INT,
TX_INT_WM,
TX_POLL
};
/**
* @RX_INT: event driven mode 1, each send and receive generates interrupt
* @RX_INT_WM: event driven mode 2, interrupts based on watermark mechanism
* @RX_POLL: poll method
* @ICP_QHDR_RX_TYPE_MASK defines position in qhdr_type
*/
enum qhdr_rx_type {
RX_INT,
RX_INT_WM,
RX_POLL
};
/**
* @Q_CMD: Host to FW command queue
* @Q_MSG: FW to Host message queue
* @Q_DEBUG: FW to Host debug queue
* @ICP_QHDR_Q_ID_MASK defines position in qhdr_type
*/
enum qhdr_q_id {
Q_CMD,
Q_MSG,
Q_DBG
};
/**
* struct hfi_qtbl_hdr
* @qtbl_version: Queue table version number
* Higher 16 bits: Major version
* Lower 16 bits: Minor version
* @qtbl_size: Queue table size from version to last parametr in qhdr entry
* @qtbl_qhdr0_offset: Offset to the start of first qhdr
* @qtbl_qhdr_size: Queue header size in bytes
* @qtbl_num_q: Total number of queues in Queue table
* @qtbl_num_active_q: Total number of active queues
*/
struct hfi_qtbl_hdr {
uint32_t qtbl_version;
uint32_t qtbl_size;
uint32_t qtbl_qhdr0_offset;
uint32_t qtbl_qhdr_size;
uint32_t qtbl_num_q;
uint32_t qtbl_num_active_q;
} __packed;
/**
* struct hfi_q_hdr
* @qhdr_status: Queue status, qhdr_state define possible status
* @qhdr_start_addr: Queue start address in non cached memory
* @qhdr_type: qhdr_tx, qhdr_rx, qhdr_q_id and priority defines qhdr type
* @qhdr_q_size: Queue size
* Number of queue packets if qhdr_pkt_size is non-zero
* Queue size in bytes if qhdr_pkt_size is zero
* @qhdr_pkt_size: Size of queue packet entries
* 0x0: variable queue packet size
* non zero: size of queue packet entry, fixed
* @qhdr_pkt_drop_cnt: Number of packets dropped by sender
* @qhdr_rx_wm: Receiver watermark, applicable in event driven mode
* @qhdr_tx_wm: Sender watermark, applicable in event driven mode
* @qhdr_rx_req: Receiver sets this bit if queue is empty
* @qhdr_tx_req: Sender sets this bit if queue is full
* @qhdr_rx_irq_status: Receiver sets this bit and triggers an interrupt to
* the sender after packets are dequeued. Sender clears this bit
* @qhdr_tx_irq_status: Sender sets this bit and triggers an interrupt to
* the receiver after packets are queued. Receiver clears this bit
* @qhdr_read_idx: Read index
* @qhdr_write_idx: Write index
*/
struct hfi_q_hdr {
uint32_t dummy[15];
uint32_t qhdr_status;
uint32_t dummy1[15];
uint32_t qhdr_start_addr;
uint32_t dummy2[15];
uint32_t qhdr_type;
uint32_t dummy3[15];
uint32_t qhdr_q_size;
uint32_t dummy4[15];
uint32_t qhdr_pkt_size;
uint32_t dummy5[15];
uint32_t qhdr_pkt_drop_cnt;
uint32_t dummy6[15];
uint32_t qhdr_rx_wm;
uint32_t dummy7[15];
uint32_t qhdr_tx_wm;
uint32_t dummy8[15];
uint32_t qhdr_rx_req;
uint32_t dummy9[15];
uint32_t qhdr_tx_req;
uint32_t dummy10[15];
uint32_t qhdr_rx_irq_status;
uint32_t dummy11[15];
uint32_t qhdr_tx_irq_status;
uint32_t dummy12[15];
uint32_t qhdr_read_idx;
uint32_t dummy13[15];
uint32_t qhdr_write_idx;
uint32_t dummy14[15];
};
/**
* struct sfr_buf
* @size: Number of characters
* @msg : Subsystem failure reason
*/
struct sfr_buf {
uint32_t size;
char msg[ICP_MSG_SFR_SIZE_IN_BYTES];
};
/**
* struct hfi_q_tbl
* @q_tbl_hdr: Queue table header
* @q_hdr: Queue header info, it holds info of cmd, msg and debug queues
*/
struct hfi_qtbl {
struct hfi_qtbl_hdr q_tbl_hdr;
struct hfi_q_hdr q_hdr[MAX_ICP_HFI_QUEUES];
};
/**
* struct hfi_info
* @map: Hfi shared memory info
* @smem_size: Shared memory size
* @uncachedheap_size: uncached heap size
* @msgpacket_buf: message buffer
* @hfi_state: State machine for hfi
* @cmd_q_lock: Lock for command queue
* @cmd_q_state: State of command queue
* @mutex msg_q_lock: Lock for message queue
* @msg_q_state: State of message queue
* @csr_base: CSR base address
*/
struct hfi_info {
struct hfi_mem_info map;
uint32_t smem_size;
uint32_t uncachedheap_size;
uint32_t msgpacket_buf[ICP_HFI_MAX_MSG_SIZE_IN_WORDS];
uint8_t hfi_state;
struct mutex cmd_q_lock;
bool cmd_q_state;
struct mutex msg_q_lock;
bool msg_q_state;
void __iomem *csr_base;
};
#endif /* _CAM_HFI_REG_H_ */

View file

@ -0,0 +1,523 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_HFI_SESSION_DEFS_H
#define _CAM_HFI_SESSION_DEFS_H
#include <linux/types.h>
#define HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO 0x1
#define HFI_IPEBPS_CMD_OPCODE_BPS_FRAME_PROCESS 0x2
#define HFI_IPEBPS_CMD_OPCODE_BPS_ABORT 0x3
#define HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY 0x4
#define HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO 0x5
#define HFI_IPEBPS_CMD_OPCODE_IPE_FRAME_PROCESS 0x6
#define HFI_IPEBPS_CMD_OPCODE_IPE_ABORT 0x7
#define HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY 0x8
#define HFI_IPEBPS_CMD_OPCODE_BPS_WAIT_FOR_IPE 0x9
#define HFI_IPEBPS_CMD_OPCODE_BPS_WAIT_FOR_BPS 0xa
#define HFI_IPEBPS_CMD_OPCODE_IPE_WAIT_FOR_BPS 0xb
#define HFI_IPEBPS_CMD_OPCODE_IPE_WAIT_FOR_IPE 0xc
#define HFI_IPEBPS_HANDLE_TYPE_BPS 0x1
#define HFI_IPEBPS_HANDLE_TYPE_IPE_RT 0x2
#define HFI_IPEBPS_HANDLE_TYPE_IPE_NON_RT 0x3
/**
* struct abort_data
* @num_req_ids: Number of req ids
* @num_req_id: point to specific req id
*
* create abort data
*/
struct abort_data {
uint32_t num_req_ids;
uint32_t num_req_id[1];
};
/**
* struct hfi_cmd_data
* @abort: abort data
* @user data: user supplied argument
*
* create session abort data
*/
struct hfi_cmd_abort {
struct abort_data abort;
uint64_t user_data;
} __packed;
/**
* struct hfi_cmd_abort_destroy
* @user_data: user supplied data
*
* IPE/BPS destroy/abort command
* @HFI_IPEBPS_CMD_OPCODE_IPE_ABORT
* @HFI_IPEBPS_CMD_OPCODE_BPS_ABORT
* @HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY
* @HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY
*/
struct hfi_cmd_abort_destroy {
uint64_t user_data;
} __packed;
/**
* struct hfi_cmd_chaining_ops
* @wait_hdl: current session handle waits on wait_hdl to complete operation
* @user_data: user supplied argument
*
* this structure for chaining opcodes
* BPS_WAITS_FOR_IPE
* BPS_WAITS_FOR_BPS
* IPE_WAITS_FOR_BPS
* IPE_WAITS_FOR_IPE
*/
struct hfi_cmd_chaining_ops {
uint32_t wait_hdl;
uint64_t user_data;
} __packed;
/**
* struct hfi_cmd_create_handle
* @size: packet size in bytes
* @pkt_type: opcode of a packet
* @handle_type: IPE/BPS firmware session handle type
* @user_data1: caller provided data1
* @user_data2: caller provided data2
*
* create firmware session handle
*/
struct hfi_cmd_create_handle {
uint32_t size;
uint32_t pkt_type;
uint32_t handle_type;
uint64_t user_data1;
uint64_t user_data2;
} __packed;
/**
* struct hfi_cmd_ipebps_async
* @size: packet size in bytes
* @pkt_type: opcode of a packet
* @opcode: opcode for IPE/BPS async operation
* CONFIG_IO: configures I/O for IPE/BPS handle
* FRAME_PROCESS: image frame to be processed by IPE/BPS
* ABORT: abort all processing frames of IPE/BPS handle
* DESTROY: destroy earlier created IPE/BPS handle
* BPS_WAITS_FOR_IPE: sync for BPS to wait for IPE
* BPS_WAITS_FOR_BPS: sync for BPS to wait for BPS
* IPE_WAITS_FOR_IPE: sync for IPE to wait for IPE
* IPE_WAITS_FOR_BPS: sync for IPE to wait for BPS
* @num_fw_handles: number of IPE/BPS firmware handles in fw_handles array
* @fw_handles: IPE/BPS handles array
* @payload: command payload for IPE/BPS opcodes
* @direct: points to actual payload
* @indirect: points to address of payload
*
* sends async command to the earlier created IPE or BPS handle
* for asynchronous operation.
*/
struct hfi_cmd_ipebps_async {
uint32_t size;
uint32_t pkt_type;
uint32_t opcode;
uint64_t user_data1;
uint64_t user_data2;
uint32_t num_fw_handles;
uint32_t fw_handles[1];
union {
uint32_t direct[1];
uint32_t indirect;
} payload;
} __packed;
/**
* struct hfi_msg_create_handle_ack
* @size: packet size in bytes
* @pkt_type: opcode of a packet
* @err_type: error code
* @fw_handle: output param for IPE/BPS handle
* @user_data1: user provided data1
* @user_data2: user provided data2
*
* ack for create handle command of IPE/BPS
* @HFI_MSG_IPEBPS_CREATE_HANDLE_ACK
*/
struct hfi_msg_create_handle_ack {
uint32_t size;
uint32_t pkt_type;
uint32_t err_type;
uint32_t fw_handle;
uint64_t user_data1;
uint64_t user_data2;
} __packed;
/**
* struct hfi_msg_ipebps_async
* @size: packet size in bytes
* @pkt_type: opcode of a packet
* @opcode: opcode of IPE/BPS async operation
* @user_data1: user provided data1
* @user_data2: user provided data2
* @err_type: error code
* @msg_data: IPE/BPS async done message data
*
* result of IPE/BPS async command
* @HFI_MSG_IPEBPS_ASYNC_COMMAND_ACK
*/
struct hfi_msg_ipebps_async_ack {
uint32_t size;
uint32_t pkt_type;
uint32_t opcode;
uint64_t user_data1;
uint64_t user_data2;
uint32_t err_type;
uint32_t msg_data[1];
} __packed;
/**
* struct hfi_msg_frame_process_done
* @result: result of frame process command
* @scratch_buffer_address: address of scratch buffer
*/
struct hfi_msg_frame_process_done {
uint32_t result;
uint32_t scratch_buffer_address;
};
/**
* struct hfi_msg_chaining_op
* @status: return status
* @user_data: user data provided as part of chaining ops
*
* IPE/BPS wait response
*/
struct hfi_msg_chaining_op {
uint32_t status;
uint64_t user_data;
} __packed;
/**
* struct hfi_msg_abort_destroy
* @status: return status
* @user_data: user data provided as part of abort/destroy ops
*
* IPE/BPS abort/destroy response
*/
struct hfi_msg_abort_destroy {
uint32_t status;
uint64_t user_data;
} __packed;
#define MAX_NUM_OF_IMAGE_PLANES 2
#define MAX_HFR_GROUP 16
enum hfi_ipe_io_images {
IPE_INPUT_IMAGE_FULL,
IPE_INPUT_IMAGE_DS4,
IPE_INPUT_IMAGE_DS16,
IPE_INPUT_IMAGE_DS64,
IPE_INPUT_IMAGE_FULL_REF,
IPE_INPUT_IMAGE_DS4_REF,
IPE_INPUT_IMAGE_DS16_REF,
IPE_INPUT_IMAGE_DS64_REF,
IPE_OUTPUT_IMAGE_DISPLAY,
IPE_OUTPUT_IMAGE_VIDEO,
IPE_OUTPUT_IMAGE_FULL_REF,
IPE_OUTPUT_IMAGE_DS4_REF,
IPE_OUTPUT_IMAGE_DS16_REF,
IPE_OUTPUT_IMAGE_DS64_REF,
IPE_INPUT_IMAGE_FIRST = IPE_INPUT_IMAGE_FULL,
IPE_INPUT_IMAGE_LAST = IPE_INPUT_IMAGE_DS64_REF,
IPE_OUTPUT_IMAGE_FIRST = IPE_OUTPUT_IMAGE_DISPLAY,
IPE_OUTPUT_IMAGE_LAST = IPE_OUTPUT_IMAGE_DS64_REF,
IPE_IO_IMAGES_MAX
};
enum bps_io_images {
BPS_INPUT_IMAGE,
BPS_OUTPUT_IMAGE_FULL,
BPS_OUTPUT_IMAGE_DS4,
BPS_OUTPUT_IMAGE_DS16,
BPS_OUTPUT_IMAGE_DS64,
BPS_OUTPUT_IMAGE_STATS_BG,
BPS_OUTPUT_IMAGE_STATS_BHIST,
BPS_OUTPUT_IMAGE_REG1,
BPS_OUTPUT_IMAGE_REG2,
BPS_OUTPUT_IMAGE_FIRST = BPS_OUTPUT_IMAGE_FULL,
BPS_OUTPUT_IMAGE_LAST = BPS_OUTPUT_IMAGE_REG2,
BPS_IO_IMAGES_MAX
};
struct frame_buffer {
uint32_t buf_ptr[MAX_NUM_OF_IMAGE_PLANES];
uint32_t meta_buf_ptr[MAX_NUM_OF_IMAGE_PLANES];
} __packed;
struct bps_frame_process_data {
struct frame_buffer buffers[BPS_IO_IMAGES_MAX];
uint32_t max_num_cores;
uint32_t target_time;
uint32_t ubwc_stats_buffer_addr;
uint32_t ubwc_stats_buffer_size;
uint32_t cdm_buffer_addr;
uint32_t cdm_buffer_size;
uint32_t iq_settings_addr;
uint32_t strip_lib_out_addr;
uint32_t cdm_prog_addr;
uint32_t request_id;
};
enum hfi_ipe_image_format {
IMAGE_FORMAT_INVALID,
IMAGE_FORMAT_MIPI_8,
IMAGE_FORMAT_MIPI_10,
IMAGE_FORMAT_MIPI_12,
IMAGE_FORMAT_MIPI_14,
IMAGE_FORMAT_BAYER_8,
IMAGE_FORMAT_BAYER_10,
IMAGE_FORMAT_BAYER_12,
IMAGE_FORMAT_BAYER_14,
IMAGE_FORMAT_PDI_10,
IMAGE_FORMAT_PD_10,
IMAGE_FORMAT_PD_8,
IMAGE_FORMAT_INDICATIONS,
IMAGE_FORMAT_REFINEMENT,
IMAGE_FORMAT_UBWC_TP_10,
IMAGE_FORMAT_UBWC_NV_12,
IMAGE_FORMAT_UBWC_NV12_4R,
IMAGE_FORMAT_UBWC_P010,
IMAGE_FORMAT_LINEAR_TP_10,
IMAGE_FORMAT_LINEAR_P010,
IMAGE_FORMAT_LINEAR_NV12,
IMAGE_FORMAT_LINEAR_PLAIN_16,
IMAGE_FORMAT_YUV422_8,
IMAGE_FORMAT_YUV422_10,
IMAGE_FORMAT_STATISTICS_BAYER_GRID,
IMAGE_FORMAT_STATISTICS_BAYER_HISTOGRAM,
IMAGE_FORMAT_MAX
};
enum hfi_ipe_plane_format {
PLANE_FORMAT_INVALID = 0,
PLANE_FORMAT_MIPI_8,
PLANE_FORMAT_MIPI_10,
PLANE_FORMAT_MIPI_12,
PLANE_FORMAT_MIPI_14,
PLANE_FORMAT_BAYER_8,
PLANE_FORMAT_BAYER_10,
PLANE_FORMAT_BAYER_12,
PLANE_FORMAT_BAYER_14,
PLANE_FORMAT_PDI_10,
PLANE_FORMAT_PD_10,
PLANE_FORMAT_PD_8,
PLANE_FORMAT_INDICATIONS,
PLANE_FORMAT_REFINEMENT,
PLANE_FORMAT_UBWC_TP_10_Y,
PLANE_FORMAT_UBWC_TP_10_C,
PLANE_FORMAT_UBWC_NV_12_Y,
PLANE_FORMAT_UBWC_NV_12_C,
PLANE_FORMAT_UBWC_NV_12_4R_Y,
PLANE_FORMAT_UBWC_NV_12_4R_C,
PLANE_FORMAT_UBWC_P010_Y,
PLANE_FORMAT_UBWC_P010_C,
PLANE_FORMAT_LINEAR_TP_10_Y,
PLANE_FORMAT_LINEAR_TP_10_C,
PLANE_FORMAT_LINEAR_P010_Y,
PLANE_FORMAT_LINEAR_P010_C,
PLANE_FORMAT_LINEAR_NV12_Y,
PLANE_FORMAT_LINEAR_NV12_C,
PLANE_FORMAT_LINEAR_PLAIN_16_Y,
PLANE_FORMAT_LINEAR_PLAIN_16_C,
PLANE_FORMAT_YUV422_8,
PLANE_FORMAT_YUV422_10,
PLANE_FORMAT_STATISTICS_BAYER_GRID,
PLANE_FORMAT_STATISTICS_BAYER_HISTOGRAM,
PLANE_FORMAT_MAX
};
enum hfi_ipe_bayer_pixel_order {
FIRST_PIXEL_R,
FIRST_PIXEL_GR,
FIRST_PIXEL_B,
FIRST_PIXEL_GB,
FIRST_PIXEL_MAX
};
enum hfi_ipe_pixel_pack_alignment {
PIXEL_LSB_ALIGNED,
PIXEL_MSB_ALIGNED,
};
enum hfi_ipe_yuv_422_order {
PIXEL_ORDER_Y_U_Y_V,
PIXEL_ORDER_Y_V_Y_U,
PIXEL_ORDER_U_Y_V_Y,
PIXEL_ORDER_V_Y_U_Y,
PIXEL_ORDER_YUV422_MAX
};
enum ubwc_write_client {
IPE_WR_CLIENT_0 = 0,
IPE_WR_CLIENT_1,
IPE_WR_CLIENT_5,
IPE_WR_CLIENT_6,
IPE_WR_CLIENT_7,
IPE_WR_CLIENT_8,
IPE_WR_CLIENT_MAX
};
/**
* struct image_info
* @format: image format
* @img_width: image width
* @img_height: image height
* @bayer_order: pixel order
* @pix_align: alignment
* @yuv422_order: YUV order
* @byte_swap: byte swap
*/
struct image_info {
enum hfi_ipe_image_format format;
uint32_t img_width;
uint32_t img_height;
enum hfi_ipe_bayer_pixel_order bayer_order;
enum hfi_ipe_pixel_pack_alignment pix_align;
enum hfi_ipe_yuv_422_order yuv422_order;
uint32_t byte_swap;
} __packed;
/**
* struct buffer_layout
* @buf_stride: buffer stride
* @buf_height: buffer height
*/
struct buffer_layout {
uint32_t buf_stride;
uint32_t buf_height;
} __packed;
/**
* struct image_desc
* @info: image info
* @buf_layout: buffer layout
* @meta_buf_layout: meta buffer layout
*/
struct image_desc {
struct image_info info;
struct buffer_layout buf_layout[MAX_NUM_OF_IMAGE_PLANES];
struct buffer_layout meta_buf_layout[MAX_NUM_OF_IMAGE_PLANES];
} __packed;
struct ica_stab_coeff {
uint32_t coeffs[8];
} __packed;
struct ica_stab_params {
uint32_t mode;
struct ica_stab_coeff transforms[3];
} __packed;
struct frame_set {
struct frame_buffer buffers[IPE_IO_IMAGES_MAX];
struct ica_stab_params ica_params;
uint32_t cdm_ica1_addr;
uint32_t cdm_ica2_addr;
} __packed;
struct ipe_frame_process_data {
uint32_t strip_lib_out_addr;
uint32_t iq_settings_addr;
uint32_t scratch_buffer_addr;
uint32_t scratch_buffer_size;
uint32_t ubwc_stats_buffer_addr;
uint32_t ubwc_stats_buffer_size;
uint32_t cdm_buffer_addr;
uint32_t cdm_buffer_size;
uint32_t max_num_cores;
uint32_t target_time;
uint32_t cdm_prog_base;
uint32_t cdm_pre_ltm;
uint32_t cdm_post_ltm;
uint32_t cdm_anr_full_pass;
uint32_t cdm_anr_ds4;
uint32_t cdm_anr_ds16;
uint32_t cdm_anr_ds64;
uint32_t cdm_tf_full_pass;
uint32_t cdm_tf_ds4;
uint32_t cdm_tf_ds16;
uint32_t cdm_tf_ds64;
uint32_t request_id;
uint32_t frames_in_batch;
struct frame_set framesets[MAX_HFR_GROUP];
} __packed;
/**
* struct hfi_cmd_ipe_config
* @images: images descreptions
* @user_data: user supplied data
*
* payload for IPE async command
*/
struct hfi_cmd_ipe_config {
struct image_desc images[IPE_IO_IMAGES_MAX];
uint64_t user_data;
} __packed;
/**
* struct frame_buffers
* @buf_ptr: buffer pointers for all planes
* @meta_buf_ptr: meta buffer pointers for all planes
*/
struct frame_buffers {
uint32_t buf_ptr[MAX_NUM_OF_IMAGE_PLANES];
uint32_t meta_buf_ptr[MAX_NUM_OF_IMAGE_PLANES];
} __packed;
/**
* struct hfi_msg_ipe_config
* @rc: result of ipe config command
* @scratch_mem_size: scratch mem size for a config
* @user_data: user data
*/
struct hfi_msg_ipe_config {
uint32_t rc;
uint32_t scratch_mem_size;
uint64_t user_data;
} __packed;
/**
* struct hfi_msg_bps_common
* @rc: result of ipe config command
* @user_data: user data
*/
struct hfi_msg_bps_common {
uint32_t rc;
uint64_t user_data;
} __packed;
/**
* struct ipe_bps_destroy
* @user_data: user data
*/
struct ipe_bps_destroy {
uint64_t userdata;
};
/**
* struct hfi_msg_ipe_frame_process
* @status: result of ipe frame process command
* @scratch_buf_addr: address of scratch buffer
* @user_data: user data
*/
struct hfi_msg_ipe_frame_process {
uint32_t status;
uint32_t scratch_buf_addr;
uint64_t user_data;
} __packed;
#endif /* _CAM_HFI_SESSION_DEFS_H */

View file

@ -0,0 +1,528 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _HFI_DEFS_H_
#define _HFI_DEFS_H_
#include <linux/types.h>
/*
* Following base acts as common starting points
* for all enumerations.
*/
#define HFI_COMMON_BASE 0x0
/* HFI Domain base offset for commands and messages */
#define HFI_DOMAIN_SHFT (24)
#define HFI_DOMAIN_BMSK (0x7 << HFI_DOMAIN_SHFT)
#define HFI_DOMAIN_BASE_ICP (0x0 << HFI_DOMAIN_SHFT)
#define HFI_DOMAIN_BASE_IPE_BPS (0x1 << HFI_DOMAIN_SHFT)
#define HFI_DOMAIN_BASE_CDM (0x2 << HFI_DOMAIN_SHFT)
#define HFI_DOMAIN_BASE_DBG (0x3 << HFI_DOMAIN_SHFT)
/* Command base offset for commands */
#define HFI_CMD_START_OFFSET 0x10000
/* Command base offset for messages */
#define HFI_MSG_START_OFFSET 0x20000
/* System Level Error types */
#define HFI_ERR_SYS_NONE (HFI_COMMON_BASE)
#define HFI_ERR_SYS_FATAL (HFI_COMMON_BASE + 0x1)
#define HFI_ERR_SYS_VERSION_MISMATCH (HFI_COMMON_BASE + 0x2)
#define HFI_ERR_SYS_UNSUPPORTED_DOMAIN (HFI_COMMON_BASE + 0x3)
#define HFI_ERR_SYS_UNSUPPORT_CMD (HFI_COMMON_BASE + 0x4)
#define HFI_ERR_SYS_CMDFAILED (HFI_COMMON_BASE + 0x5)
#define HFI_ERR_SYS_CMDSIZE (HFI_COMMON_BASE + 0x6)
/* System Level Event types */
#define HFI_EVENT_SYS_ERROR (HFI_COMMON_BASE + 0x1)
#define HFI_EVENT_ICP_ERROR (HFI_COMMON_BASE + 0x2)
#define HFI_EVENT_IPE_BPS_ERROR (HFI_COMMON_BASE + 0x3)
#define HFI_EVENT_CDM_ERROR (HFI_COMMON_BASE + 0x4)
#define HFI_EVENT_DBG_ERROR (HFI_COMMON_BASE + 0x5)
/* Core level start Ranges for errors */
#define HFI_ERR_ICP_START (HFI_COMMON_BASE + 0x64)
#define HFI_ERR_IPE_BPS_START (HFI_ERR_ICP_START + 0x64)
#define HFI_ERR_CDM_START (HFI_ERR_IPE_BPS_START + 0x64)
#define HFI_ERR_DBG_START (HFI_ERR_CDM_START + 0x64)
/*ICP Core level error messages */
#define HFI_ERR_NO_RES (HFI_ERR_ICP_START + 0x1)
#define HFI_ERR_UNSUPPORTED_RES (HFI_ERR_ICP_START + 0x2)
#define HFI_ERR_UNSUPPORTED_PROP (HFI_ERR_ICP_START + 0x3)
#define HFI_ERR_INIT_EXPECTED (HFI_ERR_ICP_START + 0x4)
#define HFI_ERR_INIT_IGNORED (HFI_ERR_ICP_START + 0x5)
/* System level commands */
#define HFI_CMD_COMMON_START \
(HFI_DOMAIN_BASE_ICP + HFI_CMD_START_OFFSET + 0x0)
#define HFI_CMD_SYS_INIT (HFI_CMD_COMMON_START + 0x1)
#define HFI_CMD_SYS_PC_PREP (HFI_CMD_COMMON_START + 0x2)
#define HFI_CMD_SYS_SET_PROPERTY (HFI_CMD_COMMON_START + 0x3)
#define HFI_CMD_SYS_GET_PROPERTY (HFI_CMD_COMMON_START + 0x4)
#define HFI_CMD_SYS_PING (HFI_CMD_COMMON_START + 0x5)
#define HFI_CMD_SYS_RESET (HFI_CMD_COMMON_START + 0x6)
/* Core level commands */
/* IPE/BPS core Commands */
#define HFI_CMD_IPE_BPS_COMMON_START \
(HFI_DOMAIN_BASE_IPE_BPS + HFI_CMD_START_OFFSET + 0x0)
#define HFI_CMD_IPEBPS_CREATE_HANDLE \
(HFI_CMD_IPE_BPS_COMMON_START + 0x8)
#define HFI_CMD_IPEBPS_ASYNC_COMMAND_DIRECT \
(HFI_CMD_IPE_BPS_COMMON_START + 0xa)
#define HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT \
(HFI_CMD_IPE_BPS_COMMON_START + 0xe)
/* CDM core Commands */
#define HFI_CMD_CDM_COMMON_START \
(HFI_DOMAIN_BASE_CDM + HFI_CMD_START_OFFSET + 0x0)
#define HFI_CMD_CDM_TEST_START (HFI_CMD_CDM_COMMON_START + 0x800)
#define HFI_CMD_CDM_END (HFI_CMD_CDM_COMMON_START + 0xFFF)
/* Debug/Test Commands */
#define HFI_CMD_DBG_COMMON_START \
(HFI_DOMAIN_BASE_DBG + HFI_CMD_START_OFFSET + 0x0)
#define HFI_CMD_DBG_TEST_START (HFI_CMD_DBG_COMMON_START + 0x800)
#define HFI_CMD_DBG_END (HFI_CMD_DBG_COMMON_START + 0xFFF)
/* System level messages */
#define HFI_MSG_ICP_COMMON_START \
(HFI_DOMAIN_BASE_ICP + HFI_MSG_START_OFFSET + 0x0)
#define HFI_MSG_SYS_INIT_DONE (HFI_MSG_ICP_COMMON_START + 0x1)
#define HFI_MSG_SYS_PC_PREP_DONE (HFI_MSG_ICP_COMMON_START + 0x2)
#define HFI_MSG_SYS_DEBUG (HFI_MSG_ICP_COMMON_START + 0x3)
#define HFI_MSG_SYS_IDLE (HFI_MSG_ICP_COMMON_START + 0x4)
#define HFI_MSG_SYS_PROPERTY_INFO (HFI_MSG_ICP_COMMON_START + 0x5)
#define HFI_MSG_SYS_PING_ACK (HFI_MSG_ICP_COMMON_START + 0x6)
#define HFI_MSG_SYS_RESET_ACK (HFI_MSG_ICP_COMMON_START + 0x7)
#define HFI_MSG_EVENT_NOTIFY (HFI_MSG_ICP_COMMON_START + 0x8)
/* Core level Messages */
/* IPE/BPS core Messages */
#define HFI_MSG_IPE_BPS_COMMON_START \
(HFI_DOMAIN_BASE_IPE_BPS + HFI_MSG_START_OFFSET + 0x0)
#define HFI_MSG_IPEBPS_CREATE_HANDLE_ACK \
(HFI_MSG_IPE_BPS_COMMON_START + 0x08)
#define HFI_MSG_IPEBPS_ASYNC_COMMAND_DIRECT_ACK \
(HFI_MSG_IPE_BPS_COMMON_START + 0x0a)
#define HFI_MSG_IPEBPS_ASYNC_COMMAND_INDIRECT_ACK \
(HFI_MSG_IPE_BPS_COMMON_START + 0x0e)
#define HFI_MSG_IPE_BPS_TEST_START \
(HFI_MSG_IPE_BPS_COMMON_START + 0x800)
#define HFI_MSG_IPE_BPS_END \
(HFI_MSG_IPE_BPS_COMMON_START + 0xFFF)
/* CDM core Messages */
#define HFI_MSG_CDM_COMMON_START \
(HFI_DOMAIN_BASE_CDM + HFI_MSG_START_OFFSET + 0x0)
#define HFI_MSG_PRI_CDM_PAYLOAD_ACK (HFI_MSG_CDM_COMMON_START + 0xa)
#define HFI_MSG_PRI_LLD_PAYLOAD_ACK (HFI_MSG_CDM_COMMON_START + 0xb)
#define HFI_MSG_CDM_TEST_START (HFI_MSG_CDM_COMMON_START + 0x800)
#define HFI_MSG_CDM_END (HFI_MSG_CDM_COMMON_START + 0xFFF)
/* core level test command ranges */
/* ICP core level test command range */
#define HFI_CMD_ICP_TEST_START (HFI_CMD_ICP_COMMON_START + 0x800)
#define HFI_CMD_ICP_END (HFI_CMD_ICP_COMMON_START + 0xFFF)
/* IPE/BPS core level test command range */
#define HFI_CMD_IPE_BPS_TEST_START \
(HFI_CMD_IPE_BPS_COMMON_START + 0x800)
#define HFI_CMD_IPE_BPS_END (HFI_CMD_IPE_BPS_COMMON_START + 0xFFF)
/* ICP core level test message range */
#define HFI_MSG_ICP_TEST_START (HFI_MSG_ICP_COMMON_START + 0x800)
#define HFI_MSG_ICP_END (HFI_MSG_ICP_COMMON_START + 0xFFF)
/* ICP core level Debug test message range */
#define HFI_MSG_DBG_COMMON_START \
(HFI_DOMAIN_BASE_DBG + 0x0)
#define HFI_MSG_DBG_TEST_START (HFI_MSG_DBG_COMMON_START + 0x800)
#define HFI_MSG_DBG_END (HFI_MSG_DBG_COMMON_START + 0xFFF)
/* System level property base offset */
#define HFI_PROPERTY_ICP_COMMON_START (HFI_DOMAIN_BASE_ICP + 0x0)
#define HFI_PROP_SYS_DEBUG_CFG (HFI_PROPERTY_ICP_COMMON_START + 0x1)
#define HFI_PROP_SYS_UBWC_CFG (HFI_PROPERTY_ICP_COMMON_START + 0x2)
#define HFI_PROP_SYS_IMAGE_VER (HFI_PROPERTY_ICP_COMMON_START + 0x3)
#define HFI_PROP_SYS_SUPPORTED (HFI_PROPERTY_ICP_COMMON_START + 0x4)
#define HFI_PROP_SYS_IPEBPS_PC (HFI_PROPERTY_ICP_COMMON_START + 0x5)
#define HFI_PROP_SYS_FW_DUMP_CFG (HFI_PROPERTY_ICP_COMMON_START + 0x8)
/* Capabilities reported at sys init */
#define HFI_CAPS_PLACEHOLDER_1 (HFI_COMMON_BASE + 0x1)
#define HFI_CAPS_PLACEHOLDER_2 (HFI_COMMON_BASE + 0x2)
/* Section describes different debug levels (HFI_DEBUG_MSG_X)
* available for debug messages from FW
*/
#define HFI_DEBUG_MSG_LOW 0x00000001
#define HFI_DEBUG_MSG_MEDIUM 0x00000002
#define HFI_DEBUG_MSG_HIGH 0x00000004
#define HFI_DEBUG_MSG_ERROR 0x00000008
#define HFI_DEBUG_MSG_FATAL 0x00000010
/* Messages containing performance data */
#define HFI_DEBUG_MSG_PERF 0x00000020
/* Disable ARM9 WFI in low power mode. */
#define HFI_DEBUG_CFG_WFI 0x01000000
/* Disable ARM9 watchdog. */
#define HFI_DEBUG_CFG_ARM9WD 0x10000000
/*
* HFI_FW_DUMP levels
* HFI_FW_DUMP_xx
*/
#define HFI_FW_DUMP_DISABLED 0x00000000
#define HFI_FW_DUMP_ON_FAILURE 0x00000001
#define HFI_FW_DUMP_ALWAYS 0x00000002
/* Number of available dump levels. */
#define NUM_HFI_DUMP_LVL 0x00000003
/* Debug Msg Communication types:
* Section describes different modes (HFI_DEBUG_MODE_X)
* available to communicate the debug messages
*/
/* Debug message output through the interface debug queue. */
#define HFI_DEBUG_MODE_QUEUE 0x00000001
/* Debug message output through QDSS. */
#define HFI_DEBUG_MODE_QDSS 0x00000002
/* Number of debug modes available. */
#define NUM_HFI_DEBUG_MODE 0x00000002
#define HFI_DEBUG_MSG_LOW 0x00000001
#define HFI_DEBUG_MSG_MEDIUM 0x00000002
#define HFI_DEBUG_MSG_HIGH 0x00000004
#define HFI_DEBUG_MSG_ERROR 0x00000008
#define HFI_DEBUG_MSG_FATAL 0x00000010
#define HFI_DEBUG_MSG_PERF 0x00000020
#define HFI_DEBUG_CFG_WFI 0x01000000
#define HFI_DEBUG_CFG_ARM9WD 0x10000000
#define HFI_DEV_VERSION_MAX 0x5
/**
* start of sys command packet types
* These commands are used to get system level information
* from firmware
*/
/**
* struct hfi_caps_support
* payload to report caps through HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED
* @type: capability type
* @min: minimum supported value for the capability
* @max: maximum supported value for the capability
* @step_size: supported steps between min-max
*/
struct hfi_caps_support {
uint32_t type;
uint32_t min;
uint32_t max;
uint32_t step_size;
} __packed;
/**
* struct hfi_caps_support_info
* capability report through HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED
* @num_caps: number of capabilities listed
* @caps_data: capabilities info array
*/
struct hfi_caps_support_info {
uint32_t num_caps;
struct hfi_caps_support caps_data[1];
} __packed;
/**
* struct hfi_debug
* payload structure to configure HFI_PROPERTY_SYS_DEBUG_CONFIG
* @debug_config: it is a result of HFI_DEBUG_MSG_X values that
* are OR-ed together to specify the debug message types
* to otput
* @debug_mode: debug message output through debug queue/qdss
* @HFI_PROPERTY_SYS_DEBUG_CONFIG
*/
struct hfi_debug {
uint32_t debug_config;
uint32_t debug_mode;
} __packed;
/**
* struct hfi_ipe_bps_pc
* payload structure to configure HFI_PROPERTY_SYS_IPEBPS_PC
* @enable: Flag to enable IPE, BPS interfrane power collapse
* @core_info: Core information to firmware
*/
struct hfi_ipe_bps_pc {
uint32_t enable;
uint32_t core_info;
} __packed;
/**
* struct hfi_cmd_ubwc_cfg
* Payload structure to configure HFI_PROP_SYS_UBWC_CFG
* @ubwc_fetch_cfg: UBWC configuration for fecth
* @ubwc_write_cfg: UBWC configuration for write
*/
struct hfi_cmd_ubwc_cfg {
uint32_t ubwc_fetch_cfg;
uint32_t ubwc_write_cfg;
};
/**
* struct hfi_cmd_sys_init
* command to initialization of system session
* @size: packet size in bytes
* @pkt_type: opcode of a packet
* @HFI_CMD_SYS_INIT
*/
struct hfi_cmd_sys_init {
uint32_t size;
uint32_t pkt_type;
} __packed;
/**
* struct hfi_cmd_pc_prep
* command to firmware to prepare for power collapse
* @eize: packet size in bytes
* @pkt_type: opcode of a packet
* @HFI_CMD_SYS_PC_PREP
*/
struct hfi_cmd_pc_prep {
uint32_t size;
uint32_t pkt_type;
} __packed;
/**
* struct hfi_cmd_prop
* command to get/set properties of firmware
* @size: packet size in bytes
* @pkt_type: opcode of a packet
* @num_prop: number of properties queried/set
* @prop_data: array of property IDs being queried. size depends on num_prop
* array of property IDs and associated structure pairs in set
* @HFI_CMD_SYS_GET_PROPERTY
* @HFI_CMD_SYS_SET_PROPERTY
*/
struct hfi_cmd_prop {
uint32_t size;
uint32_t pkt_type;
uint32_t num_prop;
uint32_t prop_data[1];
} __packed;
/**
* struct hfi_cmd_ping_pkt
* ping command pings the firmware to confirm whether
* it is alive.
* @size: packet size in bytes
* @pkt_type: opcode of a packet
* @user_data: client data, firmware returns this data
* as part of HFI_MSG_SYS_PING_ACK
* @HFI_CMD_SYS_PING
*/
struct hfi_cmd_ping_pkt {
uint32_t size;
uint32_t pkt_type;
uint64_t user_data;
} __packed;
/**
* struct hfi_cmd_sys_reset_pkt
* sends the reset command to FW. FW responds in the same type
* of packet. so can be used for reset_ack_pkt type also
* @size: packet size in bytes
* @pkt_type: opcode of a packet
* @user_data: client data, firmware returns this data
* as part of HFI_MSG_SYS_RESET_ACK
* @HFI_CMD_SYS_RESET
*/
struct hfi_cmd_sys_reset_pkt {
uint32_t size;
uint32_t pkt_type;
uint64_t user_data;
} __packed;
/* end of sys command packet types */
/* start of sys message packet types */
/**
* struct hfi_prop
* structure to report maximum supported features of firmware.
*/
struct hfi_sys_support {
uint32_t place_holder;
} __packed;
/**
* struct hfi_supported_prop
* structure to report HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED
* for a session
* @num_prop: number of properties supported
* @prop_data: array of supported property IDs
*/
struct hfi_supported_prop {
uint32_t num_prop;
uint32_t prop_data[1];
} __packed;
/**
* struct hfi_image_version
* system image version
* @major: major version number
* @minor: minor version number
* @ver_name_size: size of version name
* @ver_name: image version name
*/
struct hfi_image_version {
uint32_t major;
uint32_t minor;
uint32_t ver_name_size;
uint8_t ver_name[1];
} __packed;
/**
* struct hfi_msg_init_done_data
* @api_ver: Firmware API version
* @dev_ver: Device version
* @num_icp_hw: Number of ICP hardware information
* @dev_hw_ver: Supported hardware version information
* @reserved: Reserved field
*/
struct hfi_msg_init_done_data {
uint32_t api_ver;
uint32_t dev_ver;
uint32_t num_icp_hw;
uint32_t dev_hw_ver[HFI_DEV_VERSION_MAX];
uint32_t reserved;
};
/**
* struct hfi_msg_init_done
* system init done message from firmware. Many system level properties
* are returned with the packet
* @size: Packet size in bytes
* @pkt_type: Opcode of a packet
* @err_type: Error code associated with response
* @num_prop: Number of default capability info
* @prop_data: Array of property ids and corresponding structure pairs
*/
struct hfi_msg_init_done {
uint32_t size;
uint32_t pkt_type;
uint32_t err_type;
uint32_t num_prop;
uint32_t prop_data[1];
} __packed;
/**
* struct hfi_msg_pc_prep_done
* system power collapse preparation done message
* @size: packet size in bytes
* @pkt_type: opcode of a packet
* @err_type: error code associated with the response
*/
struct hfi_msg_pc_prep_done {
uint32_t size;
uint32_t pkt_type;
uint32_t err_type;
} __packed;
/**
* struct hfi_msg_prop
* system property info from firmware
* @size: packet size in bytes
* @pkt_type: opcode of a packet
* @num_prop: number of property info structures
* @prop_data: array of property IDs and associated structure pairs
*/
struct hfi_msg_prop {
uint32_t size;
uint32_t pkt_type;
uint32_t num_prop;
uint32_t prop_data[1];
} __packed;
/**
* struct hfi_msg_idle
* system idle message from firmware
* @size: packet size in bytes
* @pkt_type: opcode of a packet
*/
struct hfi_msg_idle {
uint32_t size;
uint32_t pkt_type;
} __packed;
/**
* struct hfi_msg_ping_ack
* system ping ack message
* @size: packet size in bytes
* @pkt_type: opcode of a packet
* @user_data: this data is sent as part of ping command from host
*/
struct hfi_msg_ping_ack {
uint32_t size;
uint32_t pkt_type;
uint64_t user_data;
} __packed;
/**
* struct hfi_msg_debug
* system debug message defination
* @size: packet size in bytes
* @pkt_type: opcode of a packet
* @msg_type: debug message type
* @msg_size: size of debug message in bytes
* @timestamp_hi: most significant 32 bits of the 64 bit timestamp field.
* timestamp shall be interpreted as a signed 64-bit value
* representing microseconds.
* @timestamp_lo: least significant 32 bits of the 64 bit timestamp field.
* timestamp shall be interpreted as a signed 64-bit value
* representing microseconds.
* @msg_data: message data in string form
*/
struct hfi_msg_debug {
uint32_t size;
uint32_t pkt_type;
uint32_t msg_type;
uint32_t msg_size;
uint32_t timestamp_hi;
uint32_t timestamp_lo;
uint8_t msg_data[1];
} __packed;
/**
* struct hfi_msg_event_notify
* event notify message
* @size: packet size in bytes
* @pkt_type: opcode of a packet
* @fw_handle: firmware session handle
* @event_id: session event id
* @event_data1: event data corresponding to event ID
* @event_data2: event data corresponding to event ID
* @ext_event_data: info array, interpreted based on event_data1
* and event_data2
*/
struct hfi_msg_event_notify {
uint32_t size;
uint32_t pkt_type;
uint32_t fw_handle;
uint32_t event_id;
uint32_t event_data1;
uint32_t event_data2;
uint32_t ext_event_data[1];
} __packed;
/**
* end of sys message packet types
*/
#endif /* _HFI_DEFS_H_ */

View file

@ -0,0 +1,890 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <asm/errno.h>
#include <linux/timer.h>
#include <media/cam_icp.h>
#include <linux/iopoll.h>
#include <soc/qcom/socinfo.h>
#include "cam_io_util.h"
#include "hfi_reg.h"
#include "hfi_sys_defs.h"
#include "hfi_session_defs.h"
#include "hfi_intf.h"
#include "cam_icp_hw_mgr_intf.h"
#include "cam_debug_util.h"
#include "cam_soc_util.h"
#define HFI_VERSION_INFO_MAJOR_VAL 1
#define HFI_VERSION_INFO_MINOR_VAL 1
#define HFI_VERSION_INFO_STEP_VAL 0
#define HFI_VERSION_INFO_STEP_VAL 0
#define HFI_VERSION_INFO_MAJOR_BMSK 0xFF000000
#define HFI_VERSION_INFO_MAJOR_SHFT 24
#define HFI_VERSION_INFO_MINOR_BMSK 0xFFFF00
#define HFI_VERSION_INFO_MINOR_SHFT 8
#define HFI_VERSION_INFO_STEP_BMSK 0xFF
#define HFI_VERSION_INFO_STEP_SHFT 0
#define HFI_MAX_POLL_TRY 5
static struct hfi_info *g_hfi;
unsigned int g_icp_mmu_hdl;
static DEFINE_MUTEX(hfi_cmd_q_mutex);
static DEFINE_MUTEX(hfi_msg_q_mutex);
void cam_hfi_queue_dump(void)
{
struct hfi_qtbl *qtbl;
struct hfi_qtbl_hdr *qtbl_hdr;
struct hfi_q_hdr *cmd_q_hdr, *msg_q_hdr;
struct hfi_mem_info *hfi_mem = NULL;
uint32_t *read_q, *read_ptr;
int i;
hfi_mem = &g_hfi->map;
if (!hfi_mem) {
CAM_ERR(CAM_HFI, "Unable to dump queues hfi memory is NULL");
return;
}
qtbl = (struct hfi_qtbl *)hfi_mem->qtbl.kva;
qtbl_hdr = &qtbl->q_tbl_hdr;
CAM_DBG(CAM_HFI,
"qtbl: version = %x size = %u num q = %u qhdr_size = %u",
qtbl_hdr->qtbl_version, qtbl_hdr->qtbl_size,
qtbl_hdr->qtbl_num_q, qtbl_hdr->qtbl_qhdr_size);
cmd_q_hdr = &qtbl->q_hdr[Q_CMD];
CAM_DBG(CAM_HFI, "cmd: size = %u r_idx = %u w_idx = %u addr = %x",
cmd_q_hdr->qhdr_q_size, cmd_q_hdr->qhdr_read_idx,
cmd_q_hdr->qhdr_write_idx, hfi_mem->cmd_q.iova);
read_q = (uint32_t *)g_hfi->map.cmd_q.kva;
read_ptr = (uint32_t *)(read_q + 0);
CAM_DBG(CAM_HFI, "CMD Q START");
for (i = 0; i < ICP_CMD_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT; i++)
CAM_DBG(CAM_HFI, "Word: %d Data: 0x%08x ", i, read_ptr[i]);
msg_q_hdr = &qtbl->q_hdr[Q_MSG];
CAM_DBG(CAM_HFI, "msg: size = %u r_idx = %u w_idx = %u addr = %x",
msg_q_hdr->qhdr_q_size, msg_q_hdr->qhdr_read_idx,
msg_q_hdr->qhdr_write_idx, hfi_mem->msg_q.iova);
read_q = (uint32_t *)g_hfi->map.msg_q.kva;
read_ptr = (uint32_t *)(read_q + 0);
CAM_DBG(CAM_HFI, "MSG Q START");
for (i = 0; i < ICP_MSG_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT; i++)
CAM_DBG(CAM_HFI, "Word: %d Data: 0x%08x ", i, read_ptr[i]);
}
int hfi_write_cmd(void *cmd_ptr)
{
uint32_t size_in_words, empty_space, new_write_idx, read_idx, temp;
uint32_t *write_q, *write_ptr;
struct hfi_qtbl *q_tbl;
struct hfi_q_hdr *q;
int rc = 0;
if (!cmd_ptr) {
CAM_ERR(CAM_HFI, "command is null");
return -EINVAL;
}
mutex_lock(&hfi_cmd_q_mutex);
if (!g_hfi) {
CAM_ERR(CAM_HFI, "HFI interface not setup");
rc = -ENODEV;
goto err;
}
if (g_hfi->hfi_state != HFI_READY ||
!g_hfi->cmd_q_state) {
CAM_ERR(CAM_HFI, "HFI state: %u, cmd q state: %u",
g_hfi->hfi_state, g_hfi->cmd_q_state);
rc = -ENODEV;
goto err;
}
q_tbl = (struct hfi_qtbl *)g_hfi->map.qtbl.kva;
q = &q_tbl->q_hdr[Q_CMD];
write_q = (uint32_t *)g_hfi->map.cmd_q.kva;
size_in_words = (*(uint32_t *)cmd_ptr) >> BYTE_WORD_SHIFT;
if (!size_in_words) {
CAM_DBG(CAM_HFI, "failed");
rc = -EINVAL;
goto err;
}
read_idx = q->qhdr_read_idx;
empty_space = (q->qhdr_write_idx >= read_idx) ?
(q->qhdr_q_size - (q->qhdr_write_idx - read_idx)) :
(read_idx - q->qhdr_write_idx);
if (empty_space <= size_in_words) {
CAM_ERR(CAM_HFI, "failed: empty space %u, size_in_words %u",
empty_space, size_in_words);
rc = -EIO;
goto err;
}
new_write_idx = q->qhdr_write_idx + size_in_words;
write_ptr = (uint32_t *)(write_q + q->qhdr_write_idx);
if (new_write_idx < q->qhdr_q_size) {
memcpy(write_ptr, (uint8_t *)cmd_ptr,
size_in_words << BYTE_WORD_SHIFT);
} else {
new_write_idx -= q->qhdr_q_size;
temp = (size_in_words - new_write_idx) << BYTE_WORD_SHIFT;
memcpy(write_ptr, (uint8_t *)cmd_ptr, temp);
memcpy(write_q, (uint8_t *)cmd_ptr + temp,
new_write_idx << BYTE_WORD_SHIFT);
}
/*
* To make sure command data in a command queue before
* updating write index
*/
wmb();
q->qhdr_write_idx = new_write_idx;
/*
* Before raising interrupt make sure command data is ready for
* firmware to process
*/
wmb();
cam_io_w_mb((uint32_t)INTR_ENABLE,
g_hfi->csr_base + HFI_REG_A5_CSR_HOST2ICPINT);
err:
mutex_unlock(&hfi_cmd_q_mutex);
return rc;
}
int hfi_read_message(uint32_t *pmsg, uint8_t q_id,
uint32_t *words_read)
{
struct hfi_qtbl *q_tbl_ptr;
struct hfi_q_hdr *q;
uint32_t new_read_idx, size_in_words, word_diff, temp;
uint32_t *read_q, *read_ptr, *write_ptr;
uint32_t size_upper_bound = 0;
int rc = 0;
if (!pmsg) {
CAM_ERR(CAM_HFI, "Invalid msg");
return -EINVAL;
}
if (q_id > Q_DBG) {
CAM_ERR(CAM_HFI, "Invalid q :%u", q_id);
return -EINVAL;
}
mutex_lock(&hfi_msg_q_mutex);
if (!g_hfi) {
CAM_ERR(CAM_HFI, "hfi not set up yet");
rc = -ENODEV;
goto err;
}
if ((g_hfi->hfi_state != HFI_READY) ||
!g_hfi->msg_q_state) {
CAM_ERR(CAM_HFI, "hfi state: %u, msg q state: %u",
g_hfi->hfi_state, g_hfi->msg_q_state);
rc = -ENODEV;
goto err;
}
q_tbl_ptr = (struct hfi_qtbl *)g_hfi->map.qtbl.kva;
q = &q_tbl_ptr->q_hdr[q_id];
if (q->qhdr_read_idx == q->qhdr_write_idx) {
CAM_DBG(CAM_HFI, "Q not ready, state:%u, r idx:%u, w idx:%u",
g_hfi->hfi_state, q->qhdr_read_idx, q->qhdr_write_idx);
rc = -EIO;
goto err;
}
if (q_id == Q_MSG) {
read_q = (uint32_t *)g_hfi->map.msg_q.kva;
size_upper_bound = ICP_HFI_MAX_PKT_SIZE_MSGQ_IN_WORDS;
} else {
read_q = (uint32_t *)g_hfi->map.dbg_q.kva;
size_upper_bound = ICP_HFI_MAX_PKT_SIZE_IN_WORDS;
}
read_ptr = (uint32_t *)(read_q + q->qhdr_read_idx);
write_ptr = (uint32_t *)(read_q + q->qhdr_write_idx);
if (write_ptr > read_ptr)
size_in_words = write_ptr - read_ptr;
else {
word_diff = read_ptr - write_ptr;
if (q_id == Q_MSG)
size_in_words = (ICP_MSG_Q_SIZE_IN_BYTES >>
BYTE_WORD_SHIFT) - word_diff;
else
size_in_words = (ICP_DBG_Q_SIZE_IN_BYTES >>
BYTE_WORD_SHIFT) - word_diff;
}
if ((size_in_words == 0) ||
(size_in_words > size_upper_bound)) {
CAM_ERR(CAM_HFI, "invalid HFI message packet size - 0x%08x",
size_in_words << BYTE_WORD_SHIFT);
q->qhdr_read_idx = q->qhdr_write_idx;
rc = -EIO;
goto err;
}
new_read_idx = q->qhdr_read_idx + size_in_words;
if (new_read_idx < q->qhdr_q_size) {
memcpy(pmsg, read_ptr, size_in_words << BYTE_WORD_SHIFT);
} else {
new_read_idx -= q->qhdr_q_size;
temp = (size_in_words - new_read_idx) << BYTE_WORD_SHIFT;
memcpy(pmsg, read_ptr, temp);
memcpy((uint8_t *)pmsg + temp, read_q,
new_read_idx << BYTE_WORD_SHIFT);
}
q->qhdr_read_idx = new_read_idx;
*words_read = size_in_words;
/* Memory Barrier to make sure message
* queue parameters are updated after read
*/
wmb();
err:
mutex_unlock(&hfi_msg_q_mutex);
return rc;
}
int hfi_cmd_ubwc_config(uint32_t *ubwc_cfg)
{
uint8_t *prop;
struct hfi_cmd_prop *dbg_prop;
uint32_t size = 0;
size = sizeof(struct hfi_cmd_prop) +
sizeof(struct hfi_cmd_ubwc_cfg);
prop = kzalloc(size, GFP_KERNEL);
if (!prop)
return -ENOMEM;
dbg_prop = (struct hfi_cmd_prop *)prop;
dbg_prop->size = size;
dbg_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY;
dbg_prop->num_prop = 1;
dbg_prop->prop_data[0] = HFI_PROP_SYS_UBWC_CFG;
dbg_prop->prop_data[1] = ubwc_cfg[0];
dbg_prop->prop_data[2] = ubwc_cfg[1];
hfi_write_cmd(prop);
kfree(prop);
return 0;
}
int hfi_enable_ipe_bps_pc(bool enable, uint32_t core_info)
{
uint8_t *prop;
struct hfi_cmd_prop *dbg_prop;
uint32_t size = 0;
size = sizeof(struct hfi_cmd_prop) +
sizeof(struct hfi_ipe_bps_pc);
prop = kzalloc(size, GFP_KERNEL);
if (!prop)
return -ENOMEM;
dbg_prop = (struct hfi_cmd_prop *)prop;
dbg_prop->size = size;
dbg_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY;
dbg_prop->num_prop = 1;
dbg_prop->prop_data[0] = HFI_PROP_SYS_IPEBPS_PC;
dbg_prop->prop_data[1] = enable;
dbg_prop->prop_data[2] = core_info;
hfi_write_cmd(prop);
kfree(prop);
return 0;
}
int hfi_set_debug_level(u64 a5_dbg_type, uint32_t lvl)
{
uint8_t *prop;
struct hfi_cmd_prop *dbg_prop;
uint32_t size = 0, val;
val = HFI_DEBUG_MSG_LOW |
HFI_DEBUG_MSG_MEDIUM |
HFI_DEBUG_MSG_HIGH |
HFI_DEBUG_MSG_ERROR |
HFI_DEBUG_MSG_FATAL |
HFI_DEBUG_MSG_PERF |
HFI_DEBUG_CFG_WFI |
HFI_DEBUG_CFG_ARM9WD;
if (lvl > val)
return -EINVAL;
size = sizeof(struct hfi_cmd_prop) +
sizeof(struct hfi_debug);
prop = kzalloc(size, GFP_KERNEL);
if (!prop)
return -ENOMEM;
dbg_prop = (struct hfi_cmd_prop *)prop;
dbg_prop->size = size;
dbg_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY;
dbg_prop->num_prop = 1;
dbg_prop->prop_data[0] = HFI_PROP_SYS_DEBUG_CFG;
dbg_prop->prop_data[1] = lvl;
dbg_prop->prop_data[2] = a5_dbg_type;
hfi_write_cmd(prop);
kfree(prop);
return 0;
}
int hfi_set_fw_dump_level(uint32_t lvl)
{
uint8_t *prop = NULL;
struct hfi_cmd_prop *fw_dump_level_switch_prop = NULL;
uint32_t size = 0;
CAM_DBG(CAM_HFI, "fw dump ENTER");
size = sizeof(struct hfi_cmd_prop) + sizeof(lvl);
prop = kzalloc(size, GFP_KERNEL);
if (!prop)
return -ENOMEM;
fw_dump_level_switch_prop = (struct hfi_cmd_prop *)prop;
fw_dump_level_switch_prop->size = size;
fw_dump_level_switch_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY;
fw_dump_level_switch_prop->num_prop = 1;
fw_dump_level_switch_prop->prop_data[0] = HFI_PROP_SYS_FW_DUMP_CFG;
fw_dump_level_switch_prop->prop_data[1] = lvl;
CAM_DBG(CAM_HFI, "prop->size = %d\n"
"prop->pkt_type = %d\n"
"prop->num_prop = %d\n"
"prop->prop_data[0] = %d\n"
"prop->prop_data[1] = %d\n",
fw_dump_level_switch_prop->size,
fw_dump_level_switch_prop->pkt_type,
fw_dump_level_switch_prop->num_prop,
fw_dump_level_switch_prop->prop_data[0],
fw_dump_level_switch_prop->prop_data[1]);
hfi_write_cmd(prop);
kfree(prop);
return 0;
}
void hfi_send_system_cmd(uint32_t type, uint64_t data, uint32_t size)
{
switch (type) {
case HFI_CMD_SYS_INIT: {
struct hfi_cmd_sys_init init;
memset(&init, 0, sizeof(init));
init.size = sizeof(struct hfi_cmd_sys_init);
init.pkt_type = type;
hfi_write_cmd(&init);
}
break;
case HFI_CMD_SYS_PC_PREP: {
struct hfi_cmd_pc_prep prep;
prep.size = sizeof(struct hfi_cmd_pc_prep);
prep.pkt_type = type;
hfi_write_cmd(&prep);
}
break;
case HFI_CMD_SYS_SET_PROPERTY: {
struct hfi_cmd_prop prop;
if ((uint32_t)data == (uint32_t)HFI_PROP_SYS_DEBUG_CFG) {
prop.size = sizeof(struct hfi_cmd_prop);
prop.pkt_type = type;
prop.num_prop = 1;
prop.prop_data[0] = HFI_PROP_SYS_DEBUG_CFG;
hfi_write_cmd(&prop);
}
}
break;
case HFI_CMD_SYS_GET_PROPERTY:
break;
case HFI_CMD_SYS_PING: {
struct hfi_cmd_ping_pkt ping;
ping.size = sizeof(struct hfi_cmd_ping_pkt);
ping.pkt_type = type;
ping.user_data = (uint64_t)data;
hfi_write_cmd(&ping);
}
break;
case HFI_CMD_SYS_RESET: {
struct hfi_cmd_sys_reset_pkt reset;
reset.size = sizeof(struct hfi_cmd_sys_reset_pkt);
reset.pkt_type = type;
reset.user_data = (uint64_t)data;
hfi_write_cmd(&reset);
}
break;
case HFI_CMD_IPEBPS_CREATE_HANDLE: {
struct hfi_cmd_create_handle handle;
handle.size = sizeof(struct hfi_cmd_create_handle);
handle.pkt_type = type;
handle.handle_type = (uint32_t)data;
handle.user_data1 = 0;
hfi_write_cmd(&handle);
}
break;
case HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT:
break;
default:
CAM_ERR(CAM_HFI, "command not supported :%d", type);
break;
}
}
int hfi_get_hw_caps(void *query_buf)
{
int i = 0;
struct cam_icp_query_cap_cmd *query_cmd = NULL;
if (!query_buf) {
CAM_ERR(CAM_HFI, "query buf is NULL");
return -EINVAL;
}
query_cmd = (struct cam_icp_query_cap_cmd *)query_buf;
query_cmd->fw_version.major = 0x12;
query_cmd->fw_version.minor = 0x12;
query_cmd->fw_version.revision = 0x12;
query_cmd->api_version.major = 0x13;
query_cmd->api_version.minor = 0x13;
query_cmd->api_version.revision = 0x13;
query_cmd->num_ipe = 2;
query_cmd->num_bps = 1;
for (i = 0; i < CAM_ICP_DEV_TYPE_MAX; i++) {
query_cmd->dev_ver[i].dev_type = i;
query_cmd->dev_ver[i].hw_ver.major = 0x34 + i;
query_cmd->dev_ver[i].hw_ver.minor = 0x34 + i;
query_cmd->dev_ver[i].hw_ver.incr = 0x34 + i;
}
return 0;
}
void cam_hfi_disable_cpu(void __iomem *icp_base)
{
uint32_t data;
uint32_t val;
uint32_t try = 0;
while (try < HFI_MAX_POLL_TRY) {
data = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_STATUS);
CAM_DBG(CAM_HFI, "wfi status = %x\n", (int)data);
if (data & ICP_CSR_A5_STATUS_WFI)
break;
/* Need to poll here to confirm that FW is going trigger wfi
* and Host can the proceed. No interrupt is expected from FW
* at this time.
*/
msleep(100);
try++;
}
val = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_CONTROL);
val &= ~(ICP_FLAG_CSR_A5_EN | ICP_FLAG_CSR_WAKE_UP_EN);
cam_io_w_mb(val, icp_base + HFI_REG_A5_CSR_A5_CONTROL);
val = cam_io_r(icp_base + HFI_REG_A5_CSR_NSEC_RESET);
cam_io_w_mb(val, icp_base + HFI_REG_A5_CSR_NSEC_RESET);
cam_io_w_mb((uint32_t)ICP_INIT_REQUEST_RESET,
icp_base + HFI_REG_HOST_ICP_INIT_REQUEST);
cam_io_w_mb((uint32_t)INTR_DISABLE,
icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
}
void cam_hfi_enable_cpu(void __iomem *icp_base)
{
cam_io_w_mb((uint32_t)ICP_FLAG_CSR_A5_EN,
icp_base + HFI_REG_A5_CSR_A5_CONTROL);
cam_io_w_mb((uint32_t)0x10, icp_base + HFI_REG_A5_CSR_NSEC_RESET);
}
int cam_hfi_resume(struct hfi_mem_info *hfi_mem,
void __iomem *icp_base, bool debug)
{
int rc = 0;
uint32_t data;
uint32_t fw_version, status = 0;
uint32_t retry_cnt = 0;
cam_hfi_enable_cpu(icp_base);
g_hfi->csr_base = icp_base;
if (debug) {
cam_io_w_mb(ICP_FLAG_A5_CTRL_DBG_EN,
(icp_base + HFI_REG_A5_CSR_A5_CONTROL));
/* Barrier needed as next write should be done after
* sucessful previous write. Next write enable clock
* gating
*/
wmb();
cam_io_w_mb((uint32_t)ICP_FLAG_A5_CTRL_EN,
icp_base + HFI_REG_A5_CSR_A5_CONTROL);
} else {
cam_io_w_mb((uint32_t)ICP_FLAG_A5_CTRL_EN,
icp_base + HFI_REG_A5_CSR_A5_CONTROL);
}
while (retry_cnt < HFI_MAX_POLL_TRY) {
readw_poll_timeout((icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE),
status, (status == ICP_INIT_RESP_SUCCESS), 100, 10000);
CAM_DBG(CAM_HFI, "1: status = %u", status);
status = cam_io_r_mb(icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE);
CAM_DBG(CAM_HFI, "2: status = %u", status);
if (status == ICP_INIT_RESP_SUCCESS)
break;
if (status == ICP_INIT_RESP_FAILED) {
CAM_ERR(CAM_HFI, "ICP Init Failed. status = %u",
status);
fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
CAM_ERR(CAM_HFI, "fw version : [%x]", fw_version);
return -EINVAL;
}
retry_cnt++;
}
if ((retry_cnt == HFI_MAX_POLL_TRY) &&
(status == ICP_INIT_RESP_RESET)) {
CAM_ERR(CAM_HFI, "Reached Max retries. status = %u",
status);
fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
CAM_ERR(CAM_HFI, "fw version : [%x]", fw_version);
return -EINVAL;
}
cam_io_w_mb((uint32_t)(INTR_ENABLE|INTR_ENABLE_WD0),
icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
CAM_DBG(CAM_HFI, "fw version : [%x]", fw_version);
data = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_STATUS);
CAM_DBG(CAM_HFI, "wfi status = %x", (int)data);
cam_io_w_mb((uint32_t)hfi_mem->qtbl.iova, icp_base + HFI_REG_QTBL_PTR);
cam_io_w_mb((uint32_t)hfi_mem->sfr_buf.iova,
icp_base + HFI_REG_SFR_PTR);
cam_io_w_mb((uint32_t)hfi_mem->shmem.iova,
icp_base + HFI_REG_SHARED_MEM_PTR);
cam_io_w_mb((uint32_t)hfi_mem->shmem.len,
icp_base + HFI_REG_SHARED_MEM_SIZE);
cam_io_w_mb((uint32_t)hfi_mem->sec_heap.iova,
icp_base + HFI_REG_UNCACHED_HEAP_PTR);
cam_io_w_mb((uint32_t)hfi_mem->sec_heap.len,
icp_base + HFI_REG_UNCACHED_HEAP_SIZE);
cam_io_w_mb((uint32_t)hfi_mem->qdss.iova,
icp_base + HFI_REG_QDSS_IOVA);
cam_io_w_mb((uint32_t)hfi_mem->qdss.len,
icp_base + HFI_REG_QDSS_IOVA_SIZE);
return rc;
}
int cam_hfi_init(uint8_t event_driven_mode, struct hfi_mem_info *hfi_mem,
void __iomem *icp_base, bool debug)
{
int rc = 0;
struct hfi_qtbl *qtbl;
struct hfi_qtbl_hdr *qtbl_hdr;
struct hfi_q_hdr *cmd_q_hdr, *msg_q_hdr, *dbg_q_hdr;
uint32_t hw_version, soc_version, fw_version, status = 0;
uint32_t retry_cnt = 0;
struct sfr_buf *sfr_buffer;
mutex_lock(&hfi_cmd_q_mutex);
mutex_lock(&hfi_msg_q_mutex);
if (!g_hfi) {
g_hfi = kzalloc(sizeof(struct hfi_info), GFP_KERNEL);
if (!g_hfi) {
rc = -ENOMEM;
goto alloc_fail;
}
}
if (g_hfi->hfi_state != HFI_DEINIT) {
CAM_ERR(CAM_HFI, "hfi_init: invalid state");
return -EINVAL;
}
memcpy(&g_hfi->map, hfi_mem, sizeof(g_hfi->map));
g_hfi->hfi_state = HFI_DEINIT;
soc_version = socinfo_get_version();
if (debug) {
cam_io_w_mb(
(uint32_t)(ICP_FLAG_CSR_A5_EN | ICP_FLAG_CSR_WAKE_UP_EN |
ICP_CSR_EDBGRQ | ICP_CSR_DBGSWENABLE),
icp_base + HFI_REG_A5_CSR_A5_CONTROL);
msleep(100);
cam_io_w_mb((uint32_t)(ICP_FLAG_CSR_A5_EN |
ICP_FLAG_CSR_WAKE_UP_EN | ICP_CSR_EN_CLKGATE_WFI),
icp_base + HFI_REG_A5_CSR_A5_CONTROL);
} else {
/* Due to hardware bug in V1 ICP clock gating has to be
* disabled, this is supposed to be fixed in V-2. But enabling
* the clock gating is causing the firmware hang, hence
* disabling the clock gating on both V1 and V2 until the
* hardware team root causes this
*/
cam_io_w_mb((uint32_t)ICP_FLAG_CSR_A5_EN |
ICP_FLAG_CSR_WAKE_UP_EN |
ICP_CSR_EN_CLKGATE_WFI,
icp_base + HFI_REG_A5_CSR_A5_CONTROL);
}
qtbl = (struct hfi_qtbl *)hfi_mem->qtbl.kva;
qtbl_hdr = &qtbl->q_tbl_hdr;
qtbl_hdr->qtbl_version = 0xFFFFFFFF;
qtbl_hdr->qtbl_size = sizeof(struct hfi_qtbl);
qtbl_hdr->qtbl_qhdr0_offset = sizeof(struct hfi_qtbl_hdr);
qtbl_hdr->qtbl_qhdr_size = sizeof(struct hfi_q_hdr);
qtbl_hdr->qtbl_num_q = ICP_HFI_NUMBER_OF_QS;
qtbl_hdr->qtbl_num_active_q = ICP_HFI_NUMBER_OF_QS;
/* setup host-to-firmware command queue */
cmd_q_hdr = &qtbl->q_hdr[Q_CMD];
cmd_q_hdr->qhdr_status = QHDR_ACTIVE;
cmd_q_hdr->qhdr_start_addr = hfi_mem->cmd_q.iova;
cmd_q_hdr->qhdr_q_size = ICP_CMD_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT;
cmd_q_hdr->qhdr_pkt_size = ICP_HFI_VAR_SIZE_PKT;
cmd_q_hdr->qhdr_pkt_drop_cnt = RESET;
cmd_q_hdr->qhdr_read_idx = RESET;
cmd_q_hdr->qhdr_write_idx = RESET;
/* setup firmware-to-Host message queue */
msg_q_hdr = &qtbl->q_hdr[Q_MSG];
msg_q_hdr->qhdr_status = QHDR_ACTIVE;
msg_q_hdr->qhdr_start_addr = hfi_mem->msg_q.iova;
msg_q_hdr->qhdr_q_size = ICP_MSG_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT;
msg_q_hdr->qhdr_pkt_size = ICP_HFI_VAR_SIZE_PKT;
msg_q_hdr->qhdr_pkt_drop_cnt = RESET;
msg_q_hdr->qhdr_read_idx = RESET;
msg_q_hdr->qhdr_write_idx = RESET;
/* setup firmware-to-Host message queue */
dbg_q_hdr = &qtbl->q_hdr[Q_DBG];
dbg_q_hdr->qhdr_status = QHDR_ACTIVE;
dbg_q_hdr->qhdr_start_addr = hfi_mem->dbg_q.iova;
dbg_q_hdr->qhdr_q_size = ICP_DBG_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT;
dbg_q_hdr->qhdr_pkt_size = ICP_HFI_VAR_SIZE_PKT;
dbg_q_hdr->qhdr_pkt_drop_cnt = RESET;
dbg_q_hdr->qhdr_read_idx = RESET;
dbg_q_hdr->qhdr_write_idx = RESET;
sfr_buffer = (struct sfr_buf *)hfi_mem->sfr_buf.kva;
sfr_buffer->size = ICP_MSG_SFR_SIZE_IN_BYTES;
switch (event_driven_mode) {
case INTR_MODE:
cmd_q_hdr->qhdr_type = Q_CMD;
cmd_q_hdr->qhdr_rx_wm = SET;
cmd_q_hdr->qhdr_tx_wm = SET;
cmd_q_hdr->qhdr_rx_req = SET;
cmd_q_hdr->qhdr_tx_req = RESET;
cmd_q_hdr->qhdr_rx_irq_status = RESET;
cmd_q_hdr->qhdr_tx_irq_status = RESET;
msg_q_hdr->qhdr_type = Q_MSG;
msg_q_hdr->qhdr_rx_wm = SET;
msg_q_hdr->qhdr_tx_wm = SET;
msg_q_hdr->qhdr_rx_req = SET;
msg_q_hdr->qhdr_tx_req = RESET;
msg_q_hdr->qhdr_rx_irq_status = RESET;
msg_q_hdr->qhdr_tx_irq_status = RESET;
dbg_q_hdr->qhdr_type = Q_DBG;
dbg_q_hdr->qhdr_rx_wm = SET;
dbg_q_hdr->qhdr_tx_wm = SET_WM;
dbg_q_hdr->qhdr_rx_req = RESET;
dbg_q_hdr->qhdr_tx_req = RESET;
dbg_q_hdr->qhdr_rx_irq_status = RESET;
dbg_q_hdr->qhdr_tx_irq_status = RESET;
break;
case POLL_MODE:
cmd_q_hdr->qhdr_type = Q_CMD | TX_EVENT_POLL_MODE_2 |
RX_EVENT_POLL_MODE_2;
msg_q_hdr->qhdr_type = Q_MSG | TX_EVENT_POLL_MODE_2 |
RX_EVENT_POLL_MODE_2;
dbg_q_hdr->qhdr_type = Q_DBG | TX_EVENT_POLL_MODE_2 |
RX_EVENT_POLL_MODE_2;
break;
case WM_MODE:
cmd_q_hdr->qhdr_type = Q_CMD | TX_EVENT_DRIVEN_MODE_2 |
RX_EVENT_DRIVEN_MODE_2;
cmd_q_hdr->qhdr_rx_wm = SET;
cmd_q_hdr->qhdr_tx_wm = SET;
cmd_q_hdr->qhdr_rx_req = RESET;
cmd_q_hdr->qhdr_tx_req = SET;
cmd_q_hdr->qhdr_rx_irq_status = RESET;
cmd_q_hdr->qhdr_tx_irq_status = RESET;
msg_q_hdr->qhdr_type = Q_MSG | TX_EVENT_DRIVEN_MODE_2 |
RX_EVENT_DRIVEN_MODE_2;
msg_q_hdr->qhdr_rx_wm = SET;
msg_q_hdr->qhdr_tx_wm = SET;
msg_q_hdr->qhdr_rx_req = SET;
msg_q_hdr->qhdr_tx_req = RESET;
msg_q_hdr->qhdr_rx_irq_status = RESET;
msg_q_hdr->qhdr_tx_irq_status = RESET;
dbg_q_hdr->qhdr_type = Q_DBG | TX_EVENT_DRIVEN_MODE_2 |
RX_EVENT_DRIVEN_MODE_2;
dbg_q_hdr->qhdr_rx_wm = SET;
dbg_q_hdr->qhdr_tx_wm = SET_WM;
dbg_q_hdr->qhdr_rx_req = RESET;
dbg_q_hdr->qhdr_tx_req = RESET;
dbg_q_hdr->qhdr_rx_irq_status = RESET;
dbg_q_hdr->qhdr_tx_irq_status = RESET;
break;
default:
CAM_ERR(CAM_HFI, "Invalid event driven mode :%u",
event_driven_mode);
break;
}
cam_io_w_mb((uint32_t)hfi_mem->qtbl.iova,
icp_base + HFI_REG_QTBL_PTR);
cam_io_w_mb((uint32_t)hfi_mem->sfr_buf.iova,
icp_base + HFI_REG_SFR_PTR);
cam_io_w_mb((uint32_t)hfi_mem->shmem.iova,
icp_base + HFI_REG_SHARED_MEM_PTR);
cam_io_w_mb((uint32_t)hfi_mem->shmem.len,
icp_base + HFI_REG_SHARED_MEM_SIZE);
cam_io_w_mb((uint32_t)hfi_mem->sec_heap.iova,
icp_base + HFI_REG_UNCACHED_HEAP_PTR);
cam_io_w_mb((uint32_t)hfi_mem->sec_heap.len,
icp_base + HFI_REG_UNCACHED_HEAP_SIZE);
cam_io_w_mb((uint32_t)ICP_INIT_REQUEST_SET,
icp_base + HFI_REG_HOST_ICP_INIT_REQUEST);
cam_io_w_mb((uint32_t)hfi_mem->qdss.iova,
icp_base + HFI_REG_QDSS_IOVA);
cam_io_w_mb((uint32_t)hfi_mem->qdss.len,
icp_base + HFI_REG_QDSS_IOVA_SIZE);
hw_version = cam_io_r(icp_base + HFI_REG_A5_HW_VERSION);
while (retry_cnt < HFI_MAX_POLL_TRY) {
readw_poll_timeout((icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE),
status, (status == ICP_INIT_RESP_SUCCESS), 100, 10000);
CAM_DBG(CAM_HFI, "1: status = %u rc = %d", status, rc);
status = cam_io_r_mb(icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE);
CAM_DBG(CAM_HFI, "2: status = %u rc = %d", status, rc);
if (status == ICP_INIT_RESP_SUCCESS)
break;
if (status == ICP_INIT_RESP_FAILED) {
CAM_ERR(CAM_HFI, "ICP Init Failed. status = %u",
status);
fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
CAM_ERR(CAM_HFI, "fw version : [%x]", fw_version);
goto regions_fail;
}
retry_cnt++;
}
if ((retry_cnt == HFI_MAX_POLL_TRY) &&
(status == ICP_INIT_RESP_RESET)) {
CAM_ERR(CAM_HFI, "Reached Max retries. status = %u",
status);
fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
CAM_ERR(CAM_HFI,
"hw version : : [%x], fw version : [%x]",
hw_version, fw_version);
goto regions_fail;
}
fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
CAM_DBG(CAM_HFI, "hw version : : [%x], fw version : [%x]",
hw_version, fw_version);
g_hfi->csr_base = icp_base;
g_hfi->hfi_state = HFI_READY;
g_hfi->cmd_q_state = true;
g_hfi->msg_q_state = true;
cam_io_w_mb((uint32_t)(INTR_ENABLE|INTR_ENABLE_WD0),
icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
mutex_unlock(&hfi_cmd_q_mutex);
mutex_unlock(&hfi_msg_q_mutex);
return rc;
regions_fail:
kfree(g_hfi);
g_hfi = NULL;
alloc_fail:
mutex_unlock(&hfi_cmd_q_mutex);
mutex_unlock(&hfi_msg_q_mutex);
return rc;
}
void cam_hfi_deinit(void __iomem *icp_base)
{
mutex_lock(&hfi_cmd_q_mutex);
mutex_lock(&hfi_msg_q_mutex);
if (!g_hfi) {
CAM_ERR(CAM_HFI, "hfi path not established yet");
goto err;
}
g_hfi->cmd_q_state = false;
g_hfi->msg_q_state = false;
kzfree(g_hfi);
g_hfi = NULL;
err:
mutex_unlock(&hfi_cmd_q_mutex);
mutex_unlock(&hfi_msg_q_mutex);
}

View file

@ -0,0 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
obj-$(CONFIG_SPECTRA_CAMERA) += icp_hw_mgr/ a5_hw/ ipe_hw/ bps_hw/

View file

@ -0,0 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/fw_inc
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
obj-$(CONFIG_SPECTRA_CAMERA) += a5_dev.o a5_core.o a5_soc.o

View file

@ -0,0 +1,476 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/debugfs.h>
#include <linux/videodev2.h>
#include <linux/uaccess.h>
#include <linux/platform_device.h>
#include <linux/firmware.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/elf.h>
#include <media/cam_icp.h>
#include "cam_io_util.h"
#include "cam_a5_hw_intf.h"
#include "cam_hw.h"
#include "cam_hw_intf.h"
#include "a5_core.h"
#include "a5_soc.h"
#include "cam_soc_util.h"
#include "cam_io_util.h"
#include "hfi_intf.h"
#include "hfi_sys_defs.h"
#include "cam_icp_hw_mgr_intf.h"
#include "cam_cpas_api.h"
#include "cam_debug_util.h"
static int cam_a5_cpas_vote(struct cam_a5_device_core_info *core_info,
struct cam_icp_cpas_vote *cpas_vote)
{
int rc = 0;
if (cpas_vote->ahb_vote_valid)
rc = cam_cpas_update_ahb_vote(core_info->cpas_handle,
&cpas_vote->ahb_vote);
if (cpas_vote->axi_vote_valid)
rc = cam_cpas_update_axi_vote(core_info->cpas_handle,
&cpas_vote->axi_vote);
if (rc)
CAM_ERR(CAM_ICP, "cpas vote is failed: %d", rc);
return rc;
}
static int32_t cam_icp_validate_fw(const uint8_t *elf)
{
struct elf32_hdr *elf_hdr;
if (!elf) {
CAM_ERR(CAM_ICP, "Invalid params");
return -EINVAL;
}
elf_hdr = (struct elf32_hdr *)elf;
if (memcmp(elf_hdr->e_ident, ELFMAG, SELFMAG)) {
CAM_ERR(CAM_ICP, "ICP elf identifier is failed");
return -EINVAL;
}
/* check architecture */
if (elf_hdr->e_machine != EM_ARM) {
CAM_ERR(CAM_ICP, "unsupported arch");
return -EINVAL;
}
/* check elf bit format */
if (elf_hdr->e_ident[EI_CLASS] != ELFCLASS32) {
CAM_ERR(CAM_ICP, "elf doesn't support 32 bit format");
return -EINVAL;
}
return 0;
}
static int32_t cam_icp_get_fw_size(const uint8_t *elf, uint32_t *fw_size)
{
int32_t rc = 0;
int32_t i = 0;
uint32_t num_prg_hdrs;
unsigned char *icp_prg_hdr_tbl;
uint32_t seg_mem_size = 0;
struct elf32_hdr *elf_hdr;
struct elf32_phdr *prg_hdr;
if (!elf || !fw_size) {
CAM_ERR(CAM_ICP, "invalid args");
return -EINVAL;
}
*fw_size = 0;
elf_hdr = (struct elf32_hdr *)elf;
num_prg_hdrs = elf_hdr->e_phnum;
icp_prg_hdr_tbl = (unsigned char *)elf + elf_hdr->e_phoff;
prg_hdr = (struct elf32_phdr *)&icp_prg_hdr_tbl[0];
if (!prg_hdr) {
CAM_ERR(CAM_ICP, "failed to get elf program header attr");
return -EINVAL;
}
CAM_DBG(CAM_ICP, "num_prg_hdrs = %d", num_prg_hdrs);
for (i = 0; i < num_prg_hdrs; i++, prg_hdr++) {
if (prg_hdr->p_flags == 0)
continue;
seg_mem_size = (prg_hdr->p_memsz + prg_hdr->p_align - 1) &
~(prg_hdr->p_align - 1);
seg_mem_size += prg_hdr->p_vaddr;
CAM_DBG(CAM_ICP, "memsz:%x align:%x addr:%x seg_mem_size:%x",
(int)prg_hdr->p_memsz, (int)prg_hdr->p_align,
(int)prg_hdr->p_vaddr, (int)seg_mem_size);
if (*fw_size < seg_mem_size)
*fw_size = seg_mem_size;
}
if (*fw_size == 0) {
CAM_ERR(CAM_ICP, "invalid elf fw file");
return -EINVAL;
}
return rc;
}
static int32_t cam_icp_program_fw(const uint8_t *elf,
struct cam_a5_device_core_info *core_info)
{
int32_t rc = 0;
uint32_t num_prg_hdrs;
unsigned char *icp_prg_hdr_tbl;
int32_t i = 0;
u8 *dest;
u8 *src;
struct elf32_hdr *elf_hdr;
struct elf32_phdr *prg_hdr;
elf_hdr = (struct elf32_hdr *)elf;
num_prg_hdrs = elf_hdr->e_phnum;
icp_prg_hdr_tbl = (unsigned char *)elf + elf_hdr->e_phoff;
prg_hdr = (struct elf32_phdr *)&icp_prg_hdr_tbl[0];
if (!prg_hdr) {
CAM_ERR(CAM_ICP, "failed to get elf program header attr");
return -EINVAL;
}
for (i = 0; i < num_prg_hdrs; i++, prg_hdr++) {
if (prg_hdr->p_flags == 0)
continue;
CAM_DBG(CAM_ICP, "Loading FW header size: %u",
prg_hdr->p_filesz);
if (prg_hdr->p_filesz != 0) {
src = (u8 *)((u8 *)elf + prg_hdr->p_offset);
dest = (u8 *)(((u8 *)core_info->fw_kva_addr) +
prg_hdr->p_vaddr);
memcpy_toio(dest, src, prg_hdr->p_filesz);
}
}
return rc;
}
static int32_t cam_a5_download_fw(void *device_priv)
{
int32_t rc = 0;
uint32_t fw_size;
const uint8_t *fw_start = NULL;
struct cam_hw_info *a5_dev = device_priv;
struct cam_hw_soc_info *soc_info = NULL;
struct cam_a5_device_core_info *core_info = NULL;
struct cam_a5_device_hw_info *hw_info = NULL;
struct platform_device *pdev = NULL;
struct a5_soc_info *cam_a5_soc_info = NULL;
if (!device_priv) {
CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
return -EINVAL;
}
soc_info = &a5_dev->soc_info;
core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
hw_info = core_info->a5_hw_info;
pdev = soc_info->pdev;
cam_a5_soc_info = soc_info->soc_private;
rc = request_firmware(&core_info->fw_elf, "CAMERA_ICP.elf", &pdev->dev);
if (rc) {
CAM_ERR(CAM_ICP, "Failed to locate fw: %d", rc);
return rc;
}
if (!core_info->fw_elf) {
CAM_ERR(CAM_ICP, "Invalid elf size");
rc = -EINVAL;
goto fw_download_failed;
}
fw_start = core_info->fw_elf->data;
rc = cam_icp_validate_fw(fw_start);
if (rc) {
CAM_ERR(CAM_ICP, "fw elf validation failed");
goto fw_download_failed;
}
rc = cam_icp_get_fw_size(fw_start, &fw_size);
if (rc) {
CAM_ERR(CAM_ICP, "unable to get fw size");
goto fw_download_failed;
}
if (core_info->fw_buf_len < fw_size) {
CAM_ERR(CAM_ICP, "mismatch in fw size: %u %llu",
fw_size, core_info->fw_buf_len);
rc = -EINVAL;
goto fw_download_failed;
}
rc = cam_icp_program_fw(fw_start, core_info);
if (rc) {
CAM_ERR(CAM_ICP, "fw program is failed");
goto fw_download_failed;
}
fw_download_failed:
release_firmware(core_info->fw_elf);
return rc;
}
int cam_a5_init_hw(void *device_priv,
void *init_hw_args, uint32_t arg_size)
{
struct cam_hw_info *a5_dev = device_priv;
struct cam_hw_soc_info *soc_info = NULL;
struct cam_a5_device_core_info *core_info = NULL;
struct cam_icp_cpas_vote cpas_vote;
int rc = 0;
if (!device_priv) {
CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
return -EINVAL;
}
soc_info = &a5_dev->soc_info;
core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
if ((!soc_info) || (!core_info)) {
CAM_ERR(CAM_ICP, "soc_info: %pK core_info: %pK",
soc_info, core_info);
return -EINVAL;
}
cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
cpas_vote.axi_vote.compressed_bw = CAM_ICP_A5_BW_BYTES_VOTE;
cpas_vote.axi_vote.uncompressed_bw = CAM_ICP_A5_BW_BYTES_VOTE;
rc = cam_cpas_start(core_info->cpas_handle,
&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
if (rc) {
CAM_ERR(CAM_ICP, "cpass start failed: %d", rc);
return rc;
}
core_info->cpas_start = true;
rc = cam_a5_enable_soc_resources(soc_info);
if (rc) {
CAM_ERR(CAM_ICP, "soc enable is failed: %d", rc);
if (cam_cpas_stop(core_info->cpas_handle))
CAM_ERR(CAM_ICP, "cpas stop is failed");
else
core_info->cpas_start = false;
}
return rc;
}
int cam_a5_deinit_hw(void *device_priv,
void *init_hw_args, uint32_t arg_size)
{
struct cam_hw_info *a5_dev = device_priv;
struct cam_hw_soc_info *soc_info = NULL;
struct cam_a5_device_core_info *core_info = NULL;
int rc = 0;
if (!device_priv) {
CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
return -EINVAL;
}
soc_info = &a5_dev->soc_info;
core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
if ((!soc_info) || (!core_info)) {
CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
soc_info, core_info);
return -EINVAL;
}
rc = cam_a5_disable_soc_resources(soc_info);
if (rc)
CAM_ERR(CAM_ICP, "soc disable is failed: %d", rc);
if (core_info->cpas_start) {
if (cam_cpas_stop(core_info->cpas_handle))
CAM_ERR(CAM_ICP, "cpas stop is failed");
else
core_info->cpas_start = false;
}
return rc;
}
irqreturn_t cam_a5_irq(int irq_num, void *data)
{
struct cam_hw_info *a5_dev = data;
struct cam_hw_soc_info *soc_info = NULL;
struct cam_a5_device_core_info *core_info = NULL;
struct cam_a5_device_hw_info *hw_info = NULL;
uint32_t irq_status = 0;
if (!data) {
CAM_ERR(CAM_ICP, "Invalid cam_dev_info or query_cap args");
return IRQ_HANDLED;
}
soc_info = &a5_dev->soc_info;
core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
hw_info = core_info->a5_hw_info;
irq_status = cam_io_r_mb(soc_info->reg_map[A5_SIERRA_BASE].mem_base +
core_info->a5_hw_info->a5_host_int_status);
cam_io_w_mb(irq_status,
soc_info->reg_map[A5_SIERRA_BASE].mem_base +
core_info->a5_hw_info->a5_host_int_clr);
if ((irq_status & A5_WDT_0) ||
(irq_status & A5_WDT_1)) {
CAM_ERR_RATE_LIMIT(CAM_ICP, "watch dog interrupt from A5");
}
spin_lock(&a5_dev->hw_lock);
if (core_info->irq_cb.icp_hw_mgr_cb)
core_info->irq_cb.icp_hw_mgr_cb(irq_status,
core_info->irq_cb.data);
spin_unlock(&a5_dev->hw_lock);
return IRQ_HANDLED;
}
int cam_a5_process_cmd(void *device_priv, uint32_t cmd_type,
void *cmd_args, uint32_t arg_size)
{
struct cam_hw_info *a5_dev = device_priv;
struct cam_hw_soc_info *soc_info = NULL;
struct cam_a5_device_core_info *core_info = NULL;
struct cam_a5_device_hw_info *hw_info = NULL;
struct a5_soc_info *a5_soc = NULL;
unsigned long flags;
int rc = 0;
if (!device_priv) {
CAM_ERR(CAM_ICP, "Invalid arguments");
return -EINVAL;
}
if (cmd_type >= CAM_ICP_A5_CMD_MAX) {
CAM_ERR(CAM_ICP, "Invalid command : %x", cmd_type);
return -EINVAL;
}
soc_info = &a5_dev->soc_info;
core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
hw_info = core_info->a5_hw_info;
switch (cmd_type) {
case CAM_ICP_A5_CMD_FW_DOWNLOAD:
rc = cam_a5_download_fw(device_priv);
break;
case CAM_ICP_A5_CMD_SET_FW_BUF: {
struct cam_icp_a5_set_fw_buf_info *fw_buf_info = cmd_args;
if (!cmd_args) {
CAM_ERR(CAM_ICP, "cmd args NULL");
return -EINVAL;
}
core_info->fw_buf = fw_buf_info->iova;
core_info->fw_kva_addr = fw_buf_info->kva;
core_info->fw_buf_len = fw_buf_info->len;
CAM_DBG(CAM_ICP, "fw buf info = %x %llx %lld",
core_info->fw_buf, core_info->fw_kva_addr,
core_info->fw_buf_len);
break;
}
case CAM_ICP_A5_SET_IRQ_CB: {
struct cam_icp_a5_set_irq_cb *irq_cb = cmd_args;
if (!cmd_args) {
CAM_ERR(CAM_ICP, "cmd args NULL");
return -EINVAL;
}
spin_lock_irqsave(&a5_dev->hw_lock, flags);
core_info->irq_cb.icp_hw_mgr_cb = irq_cb->icp_hw_mgr_cb;
core_info->irq_cb.data = irq_cb->data;
spin_unlock_irqrestore(&a5_dev->hw_lock, flags);
break;
}
case CAM_ICP_A5_SEND_INIT:
hfi_send_system_cmd(HFI_CMD_SYS_INIT, 0, 0);
break;
case CAM_ICP_A5_CMD_PC_PREP:
hfi_send_system_cmd(HFI_CMD_SYS_PC_PREP, 0, 0);
break;
case CAM_ICP_A5_CMD_VOTE_CPAS: {
struct cam_icp_cpas_vote *cpas_vote = cmd_args;
if (!cmd_args) {
CAM_ERR(CAM_ICP, "cmd args NULL");
return -EINVAL;
}
cam_a5_cpas_vote(core_info, cpas_vote);
break;
}
case CAM_ICP_A5_CMD_CPAS_START: {
struct cam_icp_cpas_vote *cpas_vote = cmd_args;
if (!cmd_args) {
CAM_ERR(CAM_ICP, "cmd args NULL");
return -EINVAL;
}
if (!core_info->cpas_start) {
rc = cam_cpas_start(core_info->cpas_handle,
&cpas_vote->ahb_vote,
&cpas_vote->axi_vote);
core_info->cpas_start = true;
}
break;
}
case CAM_ICP_A5_CMD_CPAS_STOP:
if (core_info->cpas_start) {
cam_cpas_stop(core_info->cpas_handle);
core_info->cpas_start = false;
}
break;
case CAM_ICP_A5_CMD_UBWC_CFG:
a5_soc = soc_info->soc_private;
if (!a5_soc) {
CAM_ERR(CAM_ICP, "A5 private soc info is NULL");
return -EINVAL;
}
rc = hfi_cmd_ubwc_config(a5_soc->ubwc_cfg);
break;
default:
break;
}
return rc;
}

View file

@ -0,0 +1,82 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef CAM_A5_CORE_H
#define CAM_A5_CORE_H
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/dma-buf.h>
#include "cam_a5_hw_intf.h"
#define A5_QGIC_BASE 0
#define A5_SIERRA_BASE 1
#define A5_CSR_BASE 2
#define A5_HOST_INT 0x1
#define A5_WDT_0 0x2
#define A5_WDT_1 0x4
#define ELF_GUARD_PAGE (2 * 1024 * 1024)
struct cam_a5_device_hw_info {
uint32_t hw_ver;
uint32_t nsec_reset;
uint32_t a5_control;
uint32_t a5_host_int_en;
uint32_t a5_host_int;
uint32_t a5_host_int_clr;
uint32_t a5_host_int_status;
uint32_t a5_host_int_set;
uint32_t host_a5_int;
uint32_t fw_version;
uint32_t init_req;
uint32_t init_response;
uint32_t shared_mem_ptr;
uint32_t shared_mem_size;
uint32_t qtbl_ptr;
uint32_t uncached_heap_ptr;
uint32_t uncached_heap_size;
uint32_t a5_status;
};
/**
* struct cam_a5_device_hw_info
* @a5_hw_info: A5 hardware info
* @fw_elf: start address of fw start with elf header
* @fw: start address of fw blob
* @fw_buf: smmu alloc/mapped fw buffer
* @fw_buf_len: fw buffer length
* @query_cap: A5 query info from firmware
* @a5_acquire: Acquire information of A5
* @irq_cb: IRQ callback
* @cpas_handle: CPAS handle for A5
* @cpast_start: state variable for cpas
*/
struct cam_a5_device_core_info {
struct cam_a5_device_hw_info *a5_hw_info;
const struct firmware *fw_elf;
void *fw;
uint32_t fw_buf;
uintptr_t fw_kva_addr;
uint64_t fw_buf_len;
struct cam_icp_a5_query_cap query_cap;
struct cam_icp_a5_acquire_dev a5_acquire[8];
struct cam_icp_a5_set_irq_cb irq_cb;
uint32_t cpas_handle;
bool cpas_start;
};
int cam_a5_init_hw(void *device_priv,
void *init_hw_args, uint32_t arg_size);
int cam_a5_deinit_hw(void *device_priv,
void *init_hw_args, uint32_t arg_size);
int cam_a5_process_cmd(void *device_priv, uint32_t cmd_type,
void *cmd_args, uint32_t arg_size);
irqreturn_t cam_a5_irq(int irq_num, void *data);
#endif /* CAM_A5_CORE_H */

View file

@ -0,0 +1,228 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mod_devicetable.h>
#include <linux/of_device.h>
#include <linux/timer.h>
#include "a5_core.h"
#include "a5_soc.h"
#include "cam_io_util.h"
#include "cam_hw.h"
#include "cam_hw_intf.h"
#include "cam_a5_hw_intf.h"
#include "cam_icp_hw_mgr_intf.h"
#include "cam_cpas_api.h"
#include "cam_debug_util.h"
struct a5_soc_info cam_a5_soc_info;
EXPORT_SYMBOL(cam_a5_soc_info);
struct cam_a5_device_hw_info cam_a5_hw_info = {
.hw_ver = 0x0,
.nsec_reset = 0x4,
.a5_control = 0x8,
.a5_host_int_en = 0x10,
.a5_host_int = 0x14,
.a5_host_int_clr = 0x18,
.a5_host_int_status = 0x1c,
.a5_host_int_set = 0x20,
.host_a5_int = 0x30,
.fw_version = 0x44,
.init_req = 0x48,
.init_response = 0x4c,
.shared_mem_ptr = 0x50,
.shared_mem_size = 0x54,
.qtbl_ptr = 0x58,
.uncached_heap_ptr = 0x5c,
.uncached_heap_size = 0x60,
.a5_status = 0x200,
};
EXPORT_SYMBOL(cam_a5_hw_info);
static bool cam_a5_cpas_cb(uint32_t client_handle, void *userdata,
struct cam_cpas_irq_data *irq_data)
{
bool error_handled = false;
if (!irq_data)
return error_handled;
switch (irq_data->irq_type) {
case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
CAM_ERR_RATE_LIMIT(CAM_ICP,
"IPE/BPS UBWC Decode error type=%d status=%x thr_err=%d, fcl_err=%d, len_md_err=%d, format_err=%d",
irq_data->irq_type,
irq_data->u.dec_err.decerr_status.value,
irq_data->u.dec_err.decerr_status.thr_err,
irq_data->u.dec_err.decerr_status.fcl_err,
irq_data->u.dec_err.decerr_status.len_md_err,
irq_data->u.dec_err.decerr_status.format_err);
error_handled = true;
break;
case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR:
CAM_ERR_RATE_LIMIT(CAM_ICP,
"IPE/BPS UBWC Encode error type=%d status=%x",
irq_data->irq_type,
irq_data->u.enc_err.encerr_status.value);
error_handled = true;
break;
default:
break;
}
return error_handled;
}
int cam_a5_register_cpas(struct cam_hw_soc_info *soc_info,
struct cam_a5_device_core_info *core_info,
uint32_t hw_idx)
{
struct cam_cpas_register_params cpas_register_params;
int rc;
cpas_register_params.dev = &soc_info->pdev->dev;
memcpy(cpas_register_params.identifier, "icp", sizeof("icp"));
cpas_register_params.cam_cpas_client_cb = cam_a5_cpas_cb;
cpas_register_params.cell_index = hw_idx;
cpas_register_params.userdata = NULL;
rc = cam_cpas_register_client(&cpas_register_params);
if (rc < 0) {
CAM_ERR(CAM_ICP, "failed: %d", rc);
return rc;
}
core_info->cpas_handle = cpas_register_params.client_handle;
return rc;
}
int cam_a5_probe(struct platform_device *pdev)
{
int rc = 0;
struct cam_hw_info *a5_dev = NULL;
struct cam_hw_intf *a5_dev_intf = NULL;
const struct of_device_id *match_dev = NULL;
struct cam_a5_device_core_info *core_info = NULL;
struct cam_a5_device_hw_info *hw_info = NULL;
a5_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
if (!a5_dev_intf)
return -ENOMEM;
of_property_read_u32(pdev->dev.of_node,
"cell-index", &a5_dev_intf->hw_idx);
a5_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
if (!a5_dev) {
rc = -ENOMEM;
goto a5_dev_alloc_failure;
}
a5_dev->soc_info.pdev = pdev;
a5_dev->soc_info.dev = &pdev->dev;
a5_dev->soc_info.dev_name = pdev->name;
a5_dev_intf->hw_priv = a5_dev;
a5_dev_intf->hw_ops.init = cam_a5_init_hw;
a5_dev_intf->hw_ops.deinit = cam_a5_deinit_hw;
a5_dev_intf->hw_ops.process_cmd = cam_a5_process_cmd;
a5_dev_intf->hw_type = CAM_ICP_DEV_A5;
CAM_DBG(CAM_ICP, "type %d index %d",
a5_dev_intf->hw_type,
a5_dev_intf->hw_idx);
platform_set_drvdata(pdev, a5_dev_intf);
a5_dev->core_info = kzalloc(sizeof(struct cam_a5_device_core_info),
GFP_KERNEL);
if (!a5_dev->core_info) {
rc = -ENOMEM;
goto core_info_alloc_failure;
}
core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
match_dev = of_match_device(pdev->dev.driver->of_match_table,
&pdev->dev);
if (!match_dev) {
CAM_ERR(CAM_ICP, "No a5 hardware info");
rc = -EINVAL;
goto match_err;
}
hw_info = (struct cam_a5_device_hw_info *)match_dev->data;
core_info->a5_hw_info = hw_info;
a5_dev->soc_info.soc_private = &cam_a5_soc_info;
rc = cam_a5_init_soc_resources(&a5_dev->soc_info, cam_a5_irq,
a5_dev);
if (rc < 0) {
CAM_ERR(CAM_ICP, "failed to init_soc");
goto init_soc_failure;
}
CAM_DBG(CAM_ICP, "soc info : %pK",
(void *)&a5_dev->soc_info);
rc = cam_a5_register_cpas(&a5_dev->soc_info,
core_info, a5_dev_intf->hw_idx);
if (rc < 0) {
CAM_ERR(CAM_ICP, "a5 cpas registration failed");
goto cpas_reg_failed;
}
a5_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
mutex_init(&a5_dev->hw_mutex);
spin_lock_init(&a5_dev->hw_lock);
init_completion(&a5_dev->hw_complete);
CAM_DBG(CAM_ICP, "A5%d probe successful",
a5_dev_intf->hw_idx);
return 0;
cpas_reg_failed:
init_soc_failure:
match_err:
kfree(a5_dev->core_info);
core_info_alloc_failure:
kfree(a5_dev);
a5_dev_alloc_failure:
kfree(a5_dev_intf);
return rc;
}
static const struct of_device_id cam_a5_dt_match[] = {
{
.compatible = "qcom,cam-a5",
.data = &cam_a5_hw_info,
},
{}
};
MODULE_DEVICE_TABLE(of, cam_a5_dt_match);
static struct platform_driver cam_a5_driver = {
.probe = cam_a5_probe,
.driver = {
.name = "cam-a5",
.owner = THIS_MODULE,
.of_match_table = cam_a5_dt_match,
.suppress_bind_attrs = true,
},
};
static int __init cam_a5_init_module(void)
{
return platform_driver_register(&cam_a5_driver);
}
static void __exit cam_a5_exit_module(void)
{
platform_driver_unregister(&cam_a5_driver);
}
module_init(cam_a5_init_module);
module_exit(cam_a5_exit_module);
MODULE_DESCRIPTION("CAM A5 driver");
MODULE_LICENSE("GPL v2");

View file

@ -0,0 +1,113 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/io.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/dma-buf.h>
#include <media/cam_defs.h>
#include <media/cam_icp.h>
#include "a5_soc.h"
#include "cam_soc_util.h"
#include "cam_debug_util.h"
static int cam_a5_get_dt_properties(struct cam_hw_soc_info *soc_info)
{
int rc = 0, i;
const char *fw_name;
struct a5_soc_info *camp_a5_soc_info;
struct device_node *of_node = NULL;
struct platform_device *pdev = NULL;
int num_ubwc_cfg;
pdev = soc_info->pdev;
of_node = pdev->dev.of_node;
rc = cam_soc_util_get_dt_properties(soc_info);
if (rc < 0) {
CAM_ERR(CAM_ICP, "get a5 dt prop is failed");
return rc;
}
camp_a5_soc_info = soc_info->soc_private;
fw_name = camp_a5_soc_info->fw_name;
rc = of_property_read_string(of_node, "fw_name", &fw_name);
if (rc < 0) {
CAM_ERR(CAM_ICP, "fw_name read failed");
goto end;
}
num_ubwc_cfg = of_property_count_u32_elems(of_node, "ubwc-cfg");
if ((num_ubwc_cfg < 0) || (num_ubwc_cfg > ICP_UBWC_MAX)) {
CAM_ERR(CAM_ICP, "wrong ubwc_cfg: %d", num_ubwc_cfg);
rc = num_ubwc_cfg;
goto end;
}
for (i = 0; i < num_ubwc_cfg; i++) {
rc = of_property_read_u32_index(of_node, "ubwc-cfg",
i, &camp_a5_soc_info->ubwc_cfg[i]);
if (rc < 0) {
CAM_ERR(CAM_ICP, "unable to read ubwc cfg values");
break;
}
}
end:
return rc;
}
static int cam_a5_request_platform_resource(
struct cam_hw_soc_info *soc_info,
irq_handler_t a5_irq_handler, void *irq_data)
{
int rc = 0;
rc = cam_soc_util_request_platform_resource(soc_info, a5_irq_handler,
irq_data);
return rc;
}
int cam_a5_init_soc_resources(struct cam_hw_soc_info *soc_info,
irq_handler_t a5_irq_handler, void *irq_data)
{
int rc = 0;
rc = cam_a5_get_dt_properties(soc_info);
if (rc < 0)
return rc;
rc = cam_a5_request_platform_resource(soc_info, a5_irq_handler,
irq_data);
if (rc < 0)
return rc;
return rc;
}
int cam_a5_enable_soc_resources(struct cam_hw_soc_info *soc_info)
{
int rc = 0;
rc = cam_soc_util_enable_platform_resource(soc_info, true,
CAM_SVS_VOTE, true);
if (rc)
CAM_ERR(CAM_ICP, "enable platform failed");
return rc;
}
int cam_a5_disable_soc_resources(struct cam_hw_soc_info *soc_info)
{
int rc = 0;
rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
if (rc)
CAM_ERR(CAM_ICP, "disable platform failed");
return rc;
}

View file

@ -0,0 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef CAM_A5_SOC_H
#define CAM_A5_SOC_H
#include "cam_soc_util.h"
#define ICP_UBWC_MAX 2
struct a5_soc_info {
char *fw_name;
uint32_t ubwc_cfg[ICP_UBWC_MAX];
};
int cam_a5_init_soc_resources(struct cam_hw_soc_info *soc_info,
irq_handler_t a5_irq_handler, void *irq_data);
int cam_a5_enable_soc_resources(struct cam_hw_soc_info *soc_info);
int cam_a5_disable_soc_resources(struct cam_hw_soc_info *soc_info);
#endif

View file

@ -0,0 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/fw_inc
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
obj-$(CONFIG_SPECTRA_CAMERA) += bps_dev.o bps_core.o bps_soc.o

View file

@ -0,0 +1,398 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/of.h>
#include <linux/debugfs.h>
#include <linux/videodev2.h>
#include <linux/uaccess.h>
#include <linux/platform_device.h>
#include <linux/firmware.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/iopoll.h>
#include "cam_io_util.h"
#include "cam_hw.h"
#include "cam_hw_intf.h"
#include "bps_core.h"
#include "bps_soc.h"
#include "cam_soc_util.h"
#include "cam_io_util.h"
#include "cam_bps_hw_intf.h"
#include "cam_icp_hw_intf.h"
#include "cam_icp_hw_mgr_intf.h"
#include "cam_cpas_api.h"
#include "cam_debug_util.h"
#include "hfi_reg.h"
#define HFI_MAX_POLL_TRY 5
static int cam_bps_cpas_vote(struct cam_bps_device_core_info *core_info,
struct cam_icp_cpas_vote *cpas_vote)
{
int rc = 0;
if (cpas_vote->ahb_vote_valid)
rc = cam_cpas_update_ahb_vote(core_info->cpas_handle,
&cpas_vote->ahb_vote);
if (cpas_vote->axi_vote_valid)
rc = cam_cpas_update_axi_vote(core_info->cpas_handle,
&cpas_vote->axi_vote);
if (rc < 0)
CAM_ERR(CAM_ICP, "cpas vote is failed: %d", rc);
return rc;
}
int cam_bps_init_hw(void *device_priv,
void *init_hw_args, uint32_t arg_size)
{
struct cam_hw_info *bps_dev = device_priv;
struct cam_hw_soc_info *soc_info = NULL;
struct cam_bps_device_core_info *core_info = NULL;
struct cam_icp_cpas_vote cpas_vote;
int rc = 0;
if (!device_priv) {
CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
return -EINVAL;
}
soc_info = &bps_dev->soc_info;
core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
if ((!soc_info) || (!core_info)) {
CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
soc_info, core_info);
return -EINVAL;
}
cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
cpas_vote.axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
cpas_vote.axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
rc = cam_cpas_start(core_info->cpas_handle,
&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
if (rc) {
CAM_ERR(CAM_ICP, "cpass start failed: %d", rc);
return rc;
}
core_info->cpas_start = true;
rc = cam_bps_enable_soc_resources(soc_info);
if (rc) {
CAM_ERR(CAM_ICP, "soc enable is failed: %d", rc);
if (cam_cpas_stop(core_info->cpas_handle))
CAM_ERR(CAM_ICP, "cpas stop is failed");
else
core_info->cpas_start = false;
} else {
core_info->clk_enable = true;
}
return rc;
}
int cam_bps_deinit_hw(void *device_priv,
void *init_hw_args, uint32_t arg_size)
{
struct cam_hw_info *bps_dev = device_priv;
struct cam_hw_soc_info *soc_info = NULL;
struct cam_bps_device_core_info *core_info = NULL;
int rc = 0;
if (!device_priv) {
CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
return -EINVAL;
}
soc_info = &bps_dev->soc_info;
core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
if ((!soc_info) || (!core_info)) {
CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
soc_info, core_info);
return -EINVAL;
}
rc = cam_bps_disable_soc_resources(soc_info, core_info->clk_enable);
if (rc)
CAM_ERR(CAM_ICP, "soc disable is failed: %d", rc);
core_info->clk_enable = false;
if (core_info->cpas_start) {
if (cam_cpas_stop(core_info->cpas_handle))
CAM_ERR(CAM_ICP, "cpas stop is failed");
else
core_info->cpas_start = false;
}
return rc;
}
static int cam_bps_handle_pc(struct cam_hw_info *bps_dev)
{
struct cam_hw_soc_info *soc_info = NULL;
struct cam_bps_device_core_info *core_info = NULL;
struct cam_bps_device_hw_info *hw_info = NULL;
int pwr_ctrl;
int pwr_status;
soc_info = &bps_dev->soc_info;
core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
hw_info = core_info->bps_hw_info;
cam_cpas_reg_read(core_info->cpas_handle,
CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl,
true, &pwr_ctrl);
if (!(pwr_ctrl & BPS_COLLAPSE_MASK)) {
cam_cpas_reg_read(core_info->cpas_handle,
CAM_CPAS_REG_CPASTOP, hw_info->pwr_status,
true, &pwr_status);
cam_cpas_reg_write(core_info->cpas_handle,
CAM_CPAS_REG_CPASTOP,
hw_info->pwr_ctrl, true, 0x1);
if ((pwr_status >> BPS_PWR_ON_MASK)) {
CAM_ERR(CAM_ICP, "BPS: pwr_status(%x):pwr_ctrl(%x)",
pwr_status, pwr_ctrl);
return -EINVAL;
}
}
cam_bps_get_gdsc_control(soc_info);
cam_cpas_reg_read(core_info->cpas_handle,
CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl, true,
&pwr_ctrl);
cam_cpas_reg_read(core_info->cpas_handle,
CAM_CPAS_REG_CPASTOP, hw_info->pwr_status,
true, &pwr_status);
CAM_DBG(CAM_ICP, "pwr_ctrl = %x pwr_status = %x",
pwr_ctrl, pwr_status);
return 0;
}
static int cam_bps_handle_resume(struct cam_hw_info *bps_dev)
{
struct cam_hw_soc_info *soc_info = NULL;
struct cam_bps_device_core_info *core_info = NULL;
struct cam_bps_device_hw_info *hw_info = NULL;
int pwr_ctrl;
int pwr_status;
int rc = 0;
soc_info = &bps_dev->soc_info;
core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
hw_info = core_info->bps_hw_info;
cam_cpas_reg_read(core_info->cpas_handle,
CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl, true, &pwr_ctrl);
if (pwr_ctrl & BPS_COLLAPSE_MASK) {
CAM_DBG(CAM_ICP, "BPS: pwr_ctrl set(%x)", pwr_ctrl);
cam_cpas_reg_write(core_info->cpas_handle,
CAM_CPAS_REG_CPASTOP,
hw_info->pwr_ctrl, true, 0);
}
rc = cam_bps_transfer_gdsc_control(soc_info);
cam_cpas_reg_read(core_info->cpas_handle,
CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl, true, &pwr_ctrl);
cam_cpas_reg_read(core_info->cpas_handle,
CAM_CPAS_REG_CPASTOP, hw_info->pwr_status, true, &pwr_status);
CAM_DBG(CAM_ICP, "pwr_ctrl = %x pwr_status = %x",
pwr_ctrl, pwr_status);
return rc;
}
static int cam_bps_cmd_reset(struct cam_hw_soc_info *soc_info,
struct cam_bps_device_core_info *core_info)
{
uint32_t retry_cnt = 0;
uint32_t status = 0;
int pwr_ctrl, pwr_status, rc = 0;
bool reset_bps_cdm_fail = false;
bool reset_bps_top_fail = false;
CAM_DBG(CAM_ICP, "CAM_ICP_BPS_CMD_RESET");
/* Reset BPS CDM core*/
cam_io_w_mb((uint32_t)0xF,
soc_info->reg_map[0].mem_base + BPS_CDM_RST_CMD);
while (retry_cnt < HFI_MAX_POLL_TRY) {
readw_poll_timeout((soc_info->reg_map[0].mem_base +
BPS_CDM_IRQ_STATUS),
status, ((status & BPS_RST_DONE_IRQ_STATUS_BIT) == 0x1),
100, 10000);
CAM_DBG(CAM_ICP, "bps_cdm_irq_status = %u", status);
if ((status & BPS_RST_DONE_IRQ_STATUS_BIT) == 0x1)
break;
retry_cnt++;
}
status = cam_io_r_mb(soc_info->reg_map[0].mem_base +
BPS_CDM_IRQ_STATUS);
if ((status & BPS_RST_DONE_IRQ_STATUS_BIT) != 0x1) {
CAM_ERR(CAM_ICP, "BPS CDM rst failed status 0x%x", status);
reset_bps_cdm_fail = true;
}
/* Reset BPS core*/
status = 0;
cam_io_w_mb((uint32_t)0x3,
soc_info->reg_map[0].mem_base + BPS_TOP_RST_CMD);
while (retry_cnt < HFI_MAX_POLL_TRY) {
readw_poll_timeout((soc_info->reg_map[0].mem_base +
BPS_TOP_IRQ_STATUS),
status, ((status & BPS_RST_DONE_IRQ_STATUS_BIT) == 0x1),
100, 10000);
CAM_DBG(CAM_ICP, "bps_top_irq_status = %u", status);
if ((status & BPS_RST_DONE_IRQ_STATUS_BIT) == 0x1)
break;
retry_cnt++;
}
status = cam_io_r_mb(soc_info->reg_map[0].mem_base +
BPS_TOP_IRQ_STATUS);
if ((status & BPS_RST_DONE_IRQ_STATUS_BIT) != 0x1) {
CAM_ERR(CAM_ICP, "BPS top rst failed status 0x%x", status);
reset_bps_top_fail = true;
}
cam_bps_get_gdsc_control(soc_info);
cam_cpas_reg_read(core_info->cpas_handle,
CAM_CPAS_REG_CPASTOP, core_info->bps_hw_info->pwr_ctrl,
true, &pwr_ctrl);
cam_cpas_reg_read(core_info->cpas_handle,
CAM_CPAS_REG_CPASTOP, core_info->bps_hw_info->pwr_status,
true, &pwr_status);
CAM_DBG(CAM_ICP, "(After) pwr_ctrl = %x pwr_status = %x",
pwr_ctrl, pwr_status);
if (reset_bps_cdm_fail || reset_bps_top_fail)
rc = -EAGAIN;
return rc;
}
int cam_bps_process_cmd(void *device_priv, uint32_t cmd_type,
void *cmd_args, uint32_t arg_size)
{
struct cam_hw_info *bps_dev = device_priv;
struct cam_hw_soc_info *soc_info = NULL;
struct cam_bps_device_core_info *core_info = NULL;
struct cam_bps_device_hw_info *hw_info = NULL;
int rc = 0;
if (!device_priv) {
CAM_ERR(CAM_ICP, "Invalid arguments");
return -EINVAL;
}
if (cmd_type >= CAM_ICP_BPS_CMD_MAX) {
CAM_ERR(CAM_ICP, "Invalid command : %x", cmd_type);
return -EINVAL;
}
soc_info = &bps_dev->soc_info;
core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
hw_info = core_info->bps_hw_info;
switch (cmd_type) {
case CAM_ICP_BPS_CMD_VOTE_CPAS: {
struct cam_icp_cpas_vote *cpas_vote = cmd_args;
if (!cmd_args) {
CAM_ERR(CAM_ICP, "cmd args NULL");
return -EINVAL;
}
cam_bps_cpas_vote(core_info, cpas_vote);
break;
}
case CAM_ICP_BPS_CMD_CPAS_START: {
struct cam_icp_cpas_vote *cpas_vote = cmd_args;
if (!cmd_args) {
CAM_ERR(CAM_ICP, "cmd args NULL");
return -EINVAL;
}
if (!core_info->cpas_start) {
rc = cam_cpas_start(core_info->cpas_handle,
&cpas_vote->ahb_vote,
&cpas_vote->axi_vote);
core_info->cpas_start = true;
}
break;
}
case CAM_ICP_BPS_CMD_CPAS_STOP:
if (core_info->cpas_start) {
cam_cpas_stop(core_info->cpas_handle);
core_info->cpas_start = false;
}
break;
case CAM_ICP_BPS_CMD_POWER_COLLAPSE:
rc = cam_bps_handle_pc(bps_dev);
break;
case CAM_ICP_BPS_CMD_POWER_RESUME:
rc = cam_bps_handle_resume(bps_dev);
break;
case CAM_ICP_BPS_CMD_UPDATE_CLK: {
struct cam_a5_clk_update_cmd *clk_upd_cmd =
(struct cam_a5_clk_update_cmd *)cmd_args;
uint32_t clk_rate = clk_upd_cmd->curr_clk_rate;
CAM_DBG(CAM_ICP, "bps_src_clk rate = %d", (int)clk_rate);
if (!core_info->clk_enable) {
if (clk_upd_cmd->ipe_bps_pc_enable) {
cam_bps_handle_pc(bps_dev);
cam_cpas_reg_write(core_info->cpas_handle,
CAM_CPAS_REG_CPASTOP,
hw_info->pwr_ctrl, true, 0x0);
}
rc = cam_bps_toggle_clk(soc_info, true);
if (rc)
CAM_ERR(CAM_ICP, "Enable failed");
else
core_info->clk_enable = true;
if (clk_upd_cmd->ipe_bps_pc_enable) {
rc = cam_bps_handle_resume(bps_dev);
if (rc)
CAM_ERR(CAM_ICP, "BPS resume failed");
}
}
CAM_DBG(CAM_ICP, "clock rate %d", clk_rate);
rc = cam_bps_update_clk_rate(soc_info, clk_rate);
if (rc)
CAM_ERR(CAM_ICP, "Failed to update clk");
}
break;
case CAM_ICP_BPS_CMD_DISABLE_CLK:
if (core_info->clk_enable == true)
cam_bps_toggle_clk(soc_info, false);
core_info->clk_enable = false;
break;
case CAM_ICP_BPS_CMD_RESET:
rc = cam_bps_cmd_reset(soc_info, core_info);
break;
default:
CAM_ERR(CAM_ICP, "Invalid Cmd Type:%u", cmd_type);
rc = -EINVAL;
break;
}
return rc;
}
irqreturn_t cam_bps_irq(int irq_num, void *data)
{
return IRQ_HANDLED;
}

View file

@ -0,0 +1,40 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef CAM_BPS_CORE_H
#define CAM_BPS_CORE_H
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/dma-buf.h>
#define BPS_COLLAPSE_MASK 0x1
#define BPS_PWR_ON_MASK 0x2
struct cam_bps_device_hw_info {
uint32_t hw_idx;
uint32_t pwr_ctrl;
uint32_t pwr_status;
uint32_t reserved;
};
struct cam_bps_device_core_info {
struct cam_bps_device_hw_info *bps_hw_info;
uint32_t cpas_handle;
bool cpas_start;
bool clk_enable;
};
int cam_bps_init_hw(void *device_priv,
void *init_hw_args, uint32_t arg_size);
int cam_bps_deinit_hw(void *device_priv,
void *init_hw_args, uint32_t arg_size);
int cam_bps_process_cmd(void *device_priv, uint32_t cmd_type,
void *cmd_args, uint32_t arg_size);
irqreturn_t cam_bps_irq(int irq_num, void *data);
#endif /* CAM_BPS_CORE_H */

View file

@ -0,0 +1,207 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mod_devicetable.h>
#include <linux/of_device.h>
#include <linux/timer.h>
#include "bps_core.h"
#include "bps_soc.h"
#include "cam_hw.h"
#include "cam_hw_intf.h"
#include "cam_io_util.h"
#include "cam_icp_hw_intf.h"
#include "cam_icp_hw_mgr_intf.h"
#include "cam_cpas_api.h"
#include "cam_debug_util.h"
static struct cam_bps_device_hw_info cam_bps_hw_info = {
.hw_idx = 0,
.pwr_ctrl = 0x5c,
.pwr_status = 0x58,
.reserved = 0,
};
EXPORT_SYMBOL(cam_bps_hw_info);
static char bps_dev_name[8];
static bool cam_bps_cpas_cb(uint32_t client_handle, void *userdata,
struct cam_cpas_irq_data *irq_data)
{
bool error_handled = false;
if (!irq_data)
return error_handled;
switch (irq_data->irq_type) {
case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
CAM_ERR_RATE_LIMIT(CAM_ICP,
"IPE/BPS UBWC Decode error type=%d status=%x thr_err=%d, fcl_err=%d, len_md_err=%d, format_err=%d",
irq_data->irq_type,
irq_data->u.dec_err.decerr_status.value,
irq_data->u.dec_err.decerr_status.thr_err,
irq_data->u.dec_err.decerr_status.fcl_err,
irq_data->u.dec_err.decerr_status.len_md_err,
irq_data->u.dec_err.decerr_status.format_err);
error_handled = true;
break;
case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR:
CAM_ERR_RATE_LIMIT(CAM_ICP,
"IPE/BPS UBWC Encode error type=%d status=%x",
irq_data->irq_type,
irq_data->u.enc_err.encerr_status.value);
error_handled = true;
break;
default:
break;
}
return error_handled;
}
int cam_bps_register_cpas(struct cam_hw_soc_info *soc_info,
struct cam_bps_device_core_info *core_info,
uint32_t hw_idx)
{
struct cam_cpas_register_params cpas_register_params;
int rc;
cpas_register_params.dev = &soc_info->pdev->dev;
memcpy(cpas_register_params.identifier, "bps", sizeof("bps"));
cpas_register_params.cam_cpas_client_cb = cam_bps_cpas_cb;
cpas_register_params.cell_index = hw_idx;
cpas_register_params.userdata = NULL;
rc = cam_cpas_register_client(&cpas_register_params);
if (rc < 0) {
CAM_ERR(CAM_ICP, "failed: %d", rc);
return rc;
}
core_info->cpas_handle = cpas_register_params.client_handle;
return rc;
}
int cam_bps_probe(struct platform_device *pdev)
{
struct cam_hw_info *bps_dev = NULL;
struct cam_hw_intf *bps_dev_intf = NULL;
const struct of_device_id *match_dev = NULL;
struct cam_bps_device_core_info *core_info = NULL;
struct cam_bps_device_hw_info *hw_info = NULL;
int rc = 0;
bps_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
if (!bps_dev_intf)
return -ENOMEM;
of_property_read_u32(pdev->dev.of_node,
"cell-index", &bps_dev_intf->hw_idx);
bps_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
if (!bps_dev) {
kfree(bps_dev_intf);
return -ENOMEM;
}
memset(bps_dev_name, 0, sizeof(bps_dev_name));
snprintf(bps_dev_name, sizeof(bps_dev_name),
"bps%1u", bps_dev_intf->hw_idx);
bps_dev->soc_info.pdev = pdev;
bps_dev->soc_info.dev = &pdev->dev;
bps_dev->soc_info.dev_name = bps_dev_name;
bps_dev_intf->hw_priv = bps_dev;
bps_dev_intf->hw_ops.init = cam_bps_init_hw;
bps_dev_intf->hw_ops.deinit = cam_bps_deinit_hw;
bps_dev_intf->hw_ops.process_cmd = cam_bps_process_cmd;
bps_dev_intf->hw_type = CAM_ICP_DEV_BPS;
platform_set_drvdata(pdev, bps_dev_intf);
bps_dev->core_info = kzalloc(sizeof(struct cam_bps_device_core_info),
GFP_KERNEL);
if (!bps_dev->core_info) {
kfree(bps_dev);
kfree(bps_dev_intf);
return -ENOMEM;
}
core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
match_dev = of_match_device(pdev->dev.driver->of_match_table,
&pdev->dev);
if (!match_dev) {
CAM_ERR(CAM_ICP, "No bps hardware info");
kfree(bps_dev->core_info);
kfree(bps_dev);
kfree(bps_dev_intf);
rc = -EINVAL;
return rc;
}
hw_info = &cam_bps_hw_info;
core_info->bps_hw_info = hw_info;
rc = cam_bps_init_soc_resources(&bps_dev->soc_info, cam_bps_irq,
bps_dev);
if (rc < 0) {
CAM_ERR(CAM_ICP, "failed to init_soc");
kfree(bps_dev->core_info);
kfree(bps_dev);
kfree(bps_dev_intf);
return rc;
}
CAM_DBG(CAM_ICP, "soc info : %pK",
(void *)&bps_dev->soc_info);
rc = cam_bps_register_cpas(&bps_dev->soc_info,
core_info, bps_dev_intf->hw_idx);
if (rc < 0) {
kfree(bps_dev->core_info);
kfree(bps_dev);
kfree(bps_dev_intf);
return rc;
}
bps_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
mutex_init(&bps_dev->hw_mutex);
spin_lock_init(&bps_dev->hw_lock);
init_completion(&bps_dev->hw_complete);
CAM_DBG(CAM_ICP, "BPS%d probe successful",
bps_dev_intf->hw_idx);
return rc;
}
static const struct of_device_id cam_bps_dt_match[] = {
{
.compatible = "qcom,cam-bps",
.data = &cam_bps_hw_info,
},
{}
};
MODULE_DEVICE_TABLE(of, cam_bps_dt_match);
static struct platform_driver cam_bps_driver = {
.probe = cam_bps_probe,
.driver = {
.name = "cam-bps",
.owner = THIS_MODULE,
.of_match_table = cam_bps_dt_match,
.suppress_bind_attrs = true,
},
};
static int __init cam_bps_init_module(void)
{
return platform_driver_register(&cam_bps_driver);
}
static void __exit cam_bps_exit_module(void)
{
platform_driver_unregister(&cam_bps_driver);
}
module_init(cam_bps_init_module);
module_exit(cam_bps_exit_module);
MODULE_DESCRIPTION("CAM BPS driver");
MODULE_LICENSE("GPL v2");

View file

@ -0,0 +1,163 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/io.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/dma-buf.h>
#include <media/cam_defs.h>
#include <media/cam_icp.h>
#include "bps_soc.h"
#include "cam_soc_util.h"
#include "cam_debug_util.h"
static int cam_bps_get_dt_properties(struct cam_hw_soc_info *soc_info)
{
int rc = 0;
rc = cam_soc_util_get_dt_properties(soc_info);
if (rc < 0)
CAM_ERR(CAM_ICP, "get bps dt prop is failed");
return rc;
}
static int cam_bps_request_platform_resource(
struct cam_hw_soc_info *soc_info,
irq_handler_t bps_irq_handler, void *irq_data)
{
int rc = 0;
rc = cam_soc_util_request_platform_resource(soc_info, bps_irq_handler,
irq_data);
return rc;
}
int cam_bps_init_soc_resources(struct cam_hw_soc_info *soc_info,
irq_handler_t bps_irq_handler, void *irq_data)
{
int rc = 0;
rc = cam_bps_get_dt_properties(soc_info);
if (rc < 0)
return rc;
rc = cam_bps_request_platform_resource(soc_info, bps_irq_handler,
irq_data);
if (rc < 0)
return rc;
return rc;
}
int cam_bps_enable_soc_resources(struct cam_hw_soc_info *soc_info)
{
int rc = 0;
rc = cam_soc_util_enable_platform_resource(soc_info, true,
CAM_SVS_VOTE, false);
if (rc)
CAM_ERR(CAM_ICP, "enable platform failed");
return rc;
}
int cam_bps_disable_soc_resources(struct cam_hw_soc_info *soc_info,
bool disable_clk)
{
int rc = 0;
rc = cam_soc_util_disable_platform_resource(soc_info, disable_clk,
false);
if (rc)
CAM_ERR(CAM_ICP, "disable platform failed");
return rc;
}
int cam_bps_transfer_gdsc_control(struct cam_hw_soc_info *soc_info)
{
int i;
int rc;
for (i = 0; i < soc_info->num_rgltr; i++) {
rc = regulator_set_mode(soc_info->rgltr[i],
REGULATOR_MODE_FAST);
if (rc) {
CAM_ERR(CAM_ICP, "Regulator set mode %s failed",
soc_info->rgltr_name[i]);
goto rgltr_set_mode_failed;
}
}
return 0;
rgltr_set_mode_failed:
for (i = i - 1; i >= 0; i--)
if (soc_info->rgltr[i])
regulator_set_mode(soc_info->rgltr[i],
REGULATOR_MODE_NORMAL);
return rc;
}
int cam_bps_get_gdsc_control(struct cam_hw_soc_info *soc_info)
{
int i;
int rc;
for (i = 0; i < soc_info->num_rgltr; i++) {
rc = regulator_set_mode(soc_info->rgltr[i],
REGULATOR_MODE_NORMAL);
if (rc) {
CAM_ERR(CAM_ICP, "Regulator set mode %s failed",
soc_info->rgltr_name[i]);
goto rgltr_set_mode_failed;
}
}
return 0;
rgltr_set_mode_failed:
for (i = i - 1; i >= 0; i--)
if (soc_info->rgltr[i])
regulator_set_mode(soc_info->rgltr[i],
REGULATOR_MODE_FAST);
return rc;
}
int cam_bps_update_clk_rate(struct cam_hw_soc_info *soc_info,
uint32_t clk_rate)
{
int32_t src_clk_idx;
if (!soc_info)
return -EINVAL;
src_clk_idx = soc_info->src_clk_idx;
if ((soc_info->clk_level_valid[CAM_TURBO_VOTE] == true) &&
(soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx] != 0) &&
(clk_rate > soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx])) {
CAM_DBG(CAM_ICP, "clk_rate %d greater than max, reset to %d",
clk_rate,
soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx]);
clk_rate = soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx];
}
return cam_soc_util_set_src_clk_rate(soc_info, clk_rate);
}
int cam_bps_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable)
{
int rc = 0;
if (clk_enable)
rc = cam_soc_util_clk_enable_default(soc_info, CAM_SVS_VOTE);
else
cam_soc_util_clk_disable_default(soc_info);
return rc;
}

View file

@ -0,0 +1,26 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _CAM_BPS_SOC_H_
#define _CAM_BPS_SOC_H_
#include "cam_soc_util.h"
int cam_bps_init_soc_resources(struct cam_hw_soc_info *soc_info,
irq_handler_t bps_irq_handler, void *irq_data);
int cam_bps_enable_soc_resources(struct cam_hw_soc_info *soc_info);
int cam_bps_disable_soc_resources(struct cam_hw_soc_info *soc_info,
bool disable_clk);
int cam_bps_get_gdsc_control(struct cam_hw_soc_info *soc_info);
int cam_bps_transfer_gdsc_control(struct cam_hw_soc_info *soc_info);
int cam_bps_update_clk_rate(struct cam_hw_soc_info *soc_info,
uint32_t clk_rate);
int cam_bps_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable);
#endif /* _CAM_BPS_SOC_H_*/

View file

@ -0,0 +1,18 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y += -Idrivers/media/platform/msm/camera/isp/isp_hw/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
ccflags-y += -Idrivers/media/platform/msm/camera/isp/isp_hw/hw_utils/include
ccflags-y += -Idrivers/media/platform/msm/camera/isp/isp_hw/isp_hw_mgr/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/fw_inc/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp_hw_mgr.o

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,364 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef CAM_ICP_HW_MGR_H
#define CAM_ICP_HW_MGR_H
#include <linux/types.h>
#include <linux/completion.h>
#include <media/cam_icp.h>
#include "cam_icp_hw_intf.h"
#include "cam_hw_mgr_intf.h"
#include "cam_hw_intf.h"
#include "cam_a5_hw_intf.h"
#include "hfi_session_defs.h"
#include "cam_req_mgr_workq.h"
#include "cam_mem_mgr.h"
#include "cam_smmu_api.h"
#include "cam_soc_util.h"
#include "cam_req_mgr_timer.h"
#define CAM_ICP_ROLE_PARENT 1
#define CAM_ICP_ROLE_CHILD 2
#define CAM_FRAME_CMD_MAX 20
#define CAM_MAX_OUT_RES 6
#define CAM_MAX_IN_RES 8
#define ICP_WORKQ_NUM_TASK 100
#define ICP_WORKQ_TASK_CMD_TYPE 1
#define ICP_WORKQ_TASK_MSG_TYPE 2
#define ICP_PACKET_SIZE 0
#define ICP_PACKET_TYPE 1
#define ICP_PACKET_OPCODE 2
#define ICP_MAX_OUTPUT_SUPPORTED 6
#define ICP_FRAME_PROCESS_SUCCESS 0
#define ICP_FRAME_PROCESS_FAILURE 1
#define ICP_MSG_BUF_SIZE 256
#define ICP_DBG_BUF_SIZE 102400
#define ICP_CLK_HW_IPE 0x0
#define ICP_CLK_HW_BPS 0x1
#define ICP_CLK_HW_MAX 0x2
#define ICP_OVER_CLK_THRESHOLD 15
#define CPAS_IPE0_BIT 0x1000
#define CPAS_IPE1_BIT 0x2000
#define CPAS_BPS_BIT 0x400
#define ICP_PWR_CLP_BPS 0x00000001
#define ICP_PWR_CLP_IPE0 0x00010000
#define ICP_PWR_CLP_IPE1 0x00020000
#define CAM_ICP_CTX_STATE_FREE 0x0
#define CAM_ICP_CTX_STATE_IN_USE 0x1
#define CAM_ICP_CTX_STATE_ACQUIRED 0x2
#define CAM_ICP_CTX_STATE_RELEASE 0x3
#define CAM_ICP_CTX_MAX_CMD_BUFFERS 0x2
/**
* struct icp_hfi_mem_info
* @qtbl: Memory info of queue table
* @cmd_q: Memory info of command queue
* @msg_q: Memory info of message queue
* @dbg_q: Memory info of debug queue
* @sec_heap: Memory info of secondary heap
* @fw_buf: Memory info of firmware
* @qdss_buf: Memory info of qdss
* @sfr_buf: Memory info for sfr buffer
*/
struct icp_hfi_mem_info {
struct cam_mem_mgr_memory_desc qtbl;
struct cam_mem_mgr_memory_desc cmd_q;
struct cam_mem_mgr_memory_desc msg_q;
struct cam_mem_mgr_memory_desc dbg_q;
struct cam_mem_mgr_memory_desc sec_heap;
struct cam_mem_mgr_memory_desc fw_buf;
struct cam_mem_mgr_memory_desc qdss_buf;
struct cam_mem_mgr_memory_desc sfr_buf;
struct cam_smmu_region_info shmem;
};
/**
* struct hfi_cmd_work_data
* @type: Task type
* @data: Pointer to command data
* @request_id: Request id
*/
struct hfi_cmd_work_data {
uint32_t type;
void *data;
int32_t request_id;
};
/**
* struct hfi_msg_work_data
* @type: Task type
* @data: Pointer to message data
* @irq_status: IRQ status
*/
struct hfi_msg_work_data {
uint32_t type;
void *data;
uint32_t irq_status;
};
/**
* struct clk_work_data
* @type: Task type
* @data: Pointer to clock info
*/
struct clk_work_data {
uint32_t type;
void *data;
};
/*
* struct icp_frame_info
* @request_id: request id
* @io_config: the address of io config
* @hfi_cfg_io_cmd: command struct to be sent to hfi
*/
struct icp_frame_info {
uint64_t request_id;
uint64_t io_config;
struct hfi_cmd_ipebps_async hfi_cfg_io_cmd;
};
/**
* struct hfi_frame_process_info
* @hfi_frame_cmd: Frame process command info
* @bitmap: Bitmap for hfi_frame_cmd
* @bits: Used in hfi_frame_cmd bitmap
* @lock: Lock for hfi_frame_cmd
* @request_id: Request id list
* @num_out_resources: Number of out syncs
* @out_resource: Out sync info
* @fw_process_flag: Frame process flag
* @clk_info: Clock information for a request
* @frame_info: information needed to process request
*/
struct hfi_frame_process_info {
struct hfi_cmd_ipebps_async hfi_frame_cmd[CAM_FRAME_CMD_MAX];
void *bitmap;
size_t bits;
struct mutex lock;
uint64_t request_id[CAM_FRAME_CMD_MAX];
uint32_t num_out_resources[CAM_FRAME_CMD_MAX];
uint32_t out_resource[CAM_FRAME_CMD_MAX][CAM_MAX_OUT_RES];
uint32_t in_resource[CAM_FRAME_CMD_MAX];
uint32_t in_free_resource[CAM_FRAME_CMD_MAX];
uint32_t fw_process_flag[CAM_FRAME_CMD_MAX];
struct cam_icp_clk_bw_request clk_info[CAM_FRAME_CMD_MAX];
struct icp_frame_info frame_info[CAM_FRAME_CMD_MAX];
};
/**
* struct cam_ctx_clk_info
* @curr_fc: Context latest request frame cycles
* @rt_flag: Flag to indicate real time request
* @base_clk: Base clock to process the request
* @reserved: Reserved field
* #uncompressed_bw: Current bandwidth voting
* @compressed_bw: Current compressed bandwidth voting
* @clk_rate: Supported clock rates for the context
*/
struct cam_ctx_clk_info {
uint32_t curr_fc;
uint32_t rt_flag;
uint32_t base_clk;
uint32_t reserved;
uint64_t uncompressed_bw;
uint64_t compressed_bw;
int32_t clk_rate[CAM_MAX_VOTE];
};
/**
* struct cam_icp_hw_ctx_data
* @context_priv: Context private data
* @ctx_mutex: Mutex for context
* @fw_handle: Firmware handle
* @scratch_mem_size: Scratch memory size
* @acquire_dev_cmd: Acquire command
* @icp_dev_acquire_info: Acquire device info
* @ctxt_event_cb: Context callback function
* @state: context state
* @role: Role of a context in case of chaining
* @chain_ctx: Peer context
* @hfi_frame_process: Frame process command
* @wait_complete: Completion info
* @temp_payload: Payload for destroy handle data
* @ctx_id: Context Id
* @clk_info: Current clock info of a context
* @watch_dog: watchdog timer handle
* @watch_dog_reset_counter: Counter for watch dog reset
* @icp_dev_io_info: io config resource
*/
struct cam_icp_hw_ctx_data {
void *context_priv;
struct mutex ctx_mutex;
uint32_t fw_handle;
uint32_t scratch_mem_size;
struct cam_acquire_dev_cmd acquire_dev_cmd;
struct cam_icp_acquire_dev_info *icp_dev_acquire_info;
cam_hw_event_cb_func ctxt_event_cb;
uint32_t state;
uint32_t role;
struct cam_icp_hw_ctx_data *chain_ctx;
struct hfi_frame_process_info hfi_frame_process;
struct completion wait_complete;
struct ipe_bps_destroy temp_payload;
uint32_t ctx_id;
struct cam_ctx_clk_info clk_info;
struct cam_req_mgr_timer *watch_dog;
uint32_t watch_dog_reset_counter;
struct cam_icp_acquire_dev_info icp_dev_io_info;
};
/**
* struct icp_cmd_generic_blob
* @ctx: Current context info
* @frame_info_idx: Index used for frame process info
* @io_buf_addr: pointer to io buffer address
*/
struct icp_cmd_generic_blob {
struct cam_icp_hw_ctx_data *ctx;
uint32_t frame_info_idx;
uint64_t *io_buf_addr;
};
/**
* struct cam_icp_clk_info
* @base_clk: Base clock to process request
* @curr_clk: Current clock of hadrware
* @threshold: Threshold for overclk count
* @over_clked: Over clock count
* @uncompressed_bw: Current bandwidth voting
* @compressed_bw: Current compressed bandwidth voting
* @hw_type: IPE/BPS device type
* @watch_dog: watchdog timer handle
* @watch_dog_reset_counter: Counter for watch dog reset
*/
struct cam_icp_clk_info {
uint32_t base_clk;
uint32_t curr_clk;
uint32_t threshold;
uint32_t over_clked;
uint64_t uncompressed_bw;
uint64_t compressed_bw;
uint32_t hw_type;
struct cam_req_mgr_timer *watch_dog;
uint32_t watch_dog_reset_counter;
};
/**
* struct cam_icp_hw_mgr
* @hw_mgr_mutex: Mutex for ICP hardware manager
* @hw_mgr_lock: Spinlock for ICP hardware manager
* @devices: Devices of ICP hardware manager
* @ctx_data: Context data
* @icp_caps: ICP capabilities
* @fw_download: Firmware download state
* @iommu_hdl: Non secure IOMMU handle
* @iommu_sec_hdl: Secure IOMMU handle
* @hfi_mem: Memory for hfi
* @cmd_work: Work queue for hfi commands
* @msg_work: Work queue for hfi messages
* @timer_work: Work queue for timer watchdog
* @msg_buf: Buffer for message data from firmware
* @dbg_buf: Buffer for debug data from firmware
* @a5_complete: Completion info
* @cmd_work_data: Pointer to command work queue task
* @msg_work_data: Pointer to message work queue task
* @timer_work_data: Pointer to timer work queue task
* @ctxt_cnt: Active context count
* @ipe_ctxt_cnt: IPE Active context count
* @bps_ctxt_cnt: BPS Active context count
* @dentry: Debugfs entry
* @a5_debug: A5 debug flag
* @icp_pc_flag: Flag to enable/disable power collapse
* @ipe_bps_pc_flag: Flag to enable/disable
* power collapse for ipe & bps
* @icp_debug_clk: Set clock based on debug value
* @icp_default_clk: Set this clok if user doesn't supply
* @clk_info: Clock info of hardware
* @secure_mode: Flag to enable/disable secure camera
* @a5_jtag_debug: entry to enable A5 JTAG debugging
* @a5_debug_type : entry to enable FW debug message/qdss
* @a5_dbg_lvl : debug level set to FW.
* @a5_fw_dump_lvl : level set for dumping the FW data
* @ipe0_enable: Flag for IPE0
* @ipe1_enable: Flag for IPE1
* @bps_enable: Flag for BPS
* @core_info: 32 bit value , tells IPE0/1 and BPS
* @a5_dev_intf : Device interface for A5
* @ipe0_dev_intf: Device interface for IPE0
* @ipe1_dev_intf: Device interface for IPE1
* @bps_dev_intf: Device interface for BPS
* @ipe_clk_state: IPE clock state flag
* @bps_clk_state: BPS clock state flag
* @recovery: Flag to validate if in previous session FW
* reported a fatal error or wdt. If set FW is
* re-downloaded for new camera session.
*/
struct cam_icp_hw_mgr {
struct mutex hw_mgr_mutex;
spinlock_t hw_mgr_lock;
struct cam_hw_intf **devices[CAM_ICP_DEV_MAX];
struct cam_icp_hw_ctx_data ctx_data[CAM_ICP_CTX_MAX];
struct cam_icp_query_cap_cmd icp_caps;
bool fw_download;
int32_t iommu_hdl;
int32_t iommu_sec_hdl;
struct icp_hfi_mem_info hfi_mem;
struct cam_req_mgr_core_workq *cmd_work;
struct cam_req_mgr_core_workq *msg_work;
struct cam_req_mgr_core_workq *timer_work;
uint32_t msg_buf[ICP_MSG_BUF_SIZE];
uint32_t dbg_buf[ICP_DBG_BUF_SIZE];
struct completion a5_complete;
struct hfi_cmd_work_data *cmd_work_data;
struct hfi_msg_work_data *msg_work_data;
struct hfi_msg_work_data *timer_work_data;
uint32_t ctxt_cnt;
uint32_t ipe_ctxt_cnt;
uint32_t bps_ctxt_cnt;
struct dentry *dentry;
bool a5_debug;
bool icp_pc_flag;
bool ipe_bps_pc_flag;
uint64_t icp_debug_clk;
uint64_t icp_default_clk;
struct cam_icp_clk_info clk_info[ICP_CLK_HW_MAX];
bool secure_mode;
bool a5_jtag_debug;
u64 a5_debug_type;
u64 a5_dbg_lvl;
u64 a5_fw_dump_lvl;
bool ipe0_enable;
bool ipe1_enable;
bool bps_enable;
uint32_t core_info;
struct cam_hw_intf *a5_dev_intf;
struct cam_hw_intf *ipe0_dev_intf;
struct cam_hw_intf *ipe1_dev_intf;
struct cam_hw_intf *bps_dev_intf;
bool ipe_clk_state;
bool bps_clk_state;
atomic_t recovery;
};
static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args);
static int cam_icp_mgr_hw_open(void *hw_mgr_priv, void *download_fw_args);
static int cam_icp_mgr_icp_resume(struct cam_icp_hw_mgr *hw_mgr);
static int cam_icp_mgr_icp_power_collapse(struct cam_icp_hw_mgr *hw_mgr);
#endif /* CAM_ICP_HW_MGR_H */

View file

@ -0,0 +1,74 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef CAM_A5_HW_INTF_H
#define CAM_A5_HW_INTF_H
#include <linux/timer.h>
#include <uapi/media/cam_defs.h>
#include <media/cam_icp.h>
#include "cam_hw_mgr_intf.h"
#include "cam_icp_hw_intf.h"
enum cam_icp_a5_cmd_type {
CAM_ICP_A5_CMD_FW_DOWNLOAD,
CAM_ICP_A5_CMD_POWER_COLLAPSE,
CAM_ICP_A5_CMD_POWER_RESUME,
CAM_ICP_A5_CMD_SET_FW_BUF,
CAM_ICP_A5_CMD_ACQUIRE,
CAM_ICP_A5_SET_IRQ_CB,
CAM_ICP_A5_TEST_IRQ,
CAM_ICP_A5_SEND_INIT,
CAM_ICP_A5_CMD_VOTE_CPAS,
CAM_ICP_A5_CMD_CPAS_START,
CAM_ICP_A5_CMD_CPAS_STOP,
CAM_ICP_A5_CMD_UBWC_CFG,
CAM_ICP_A5_CMD_PC_PREP,
CAM_ICP_A5_CMD_MAX,
};
struct cam_icp_a5_set_fw_buf_info {
uint32_t iova;
uint64_t kva;
uint64_t len;
};
/**
* struct cam_icp_a5_query_cap - ICP query device capability payload
* @fw_version: firmware version info
* @api_version: api version info
* @num_ipe: number of ipes
* @num_bps: number of bps
* @num_dev: number of device capabilities in dev_caps
* @reserved: reserved
* @dev_ver: returned device capability array
* @CAM_QUERY_CAP IOCTL
*/
struct cam_icp_a5_query_cap {
struct cam_icp_ver fw_version;
struct cam_icp_ver api_version;
uint32_t num_ipe;
uint32_t num_bps;
uint32_t num_dev;
uint32_t reserved;
struct cam_icp_dev_ver dev_ver[CAM_ICP_DEV_TYPE_MAX];
};
struct cam_icp_a5_acquire_dev {
uint32_t ctx_id;
struct cam_icp_acquire_dev_info icp_acquire_info;
struct cam_icp_res_info icp_out_acquire_info[2];
uint32_t fw_handle;
};
struct cam_icp_a5_set_irq_cb {
int32_t (*icp_hw_mgr_cb)(uint32_t irq_status, void *data);
void *data;
};
struct cam_icp_a5_test_irq {
uint32_t test_irq;
};
#endif /* CAM_A5_HW_INTF_H */

View file

@ -0,0 +1,37 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef CAM_BPS_HW_INTF_H
#define CAM_BPS_HW_INTF_H
#include <uapi/media/cam_defs.h>
#include <media/cam_icp.h>
#include "cam_hw_mgr_intf.h"
#include "cam_icp_hw_intf.h"
/* BPS register */
#define BPS_TOP_RST_CMD 0x1008
#define BPS_CDM_RST_CMD 0x10
#define BPS_CDM_IRQ_STATUS 0x44
#define BPS_TOP_IRQ_STATUS 0x100C
/* BPS CDM/TOP status register */
#define BPS_RST_DONE_IRQ_STATUS_BIT 0x1
enum cam_icp_bps_cmd_type {
CAM_ICP_BPS_CMD_FW_DOWNLOAD,
CAM_ICP_BPS_CMD_POWER_COLLAPSE,
CAM_ICP_BPS_CMD_POWER_RESUME,
CAM_ICP_BPS_CMD_SET_FW_BUF,
CAM_ICP_BPS_CMD_VOTE_CPAS,
CAM_ICP_BPS_CMD_CPAS_START,
CAM_ICP_BPS_CMD_CPAS_STOP,
CAM_ICP_BPS_CMD_UPDATE_CLK,
CAM_ICP_BPS_CMD_DISABLE_CLK,
CAM_ICP_BPS_CMD_RESET,
CAM_ICP_BPS_CMD_MAX,
};
#endif /* CAM_BPS_HW_INTF_H */

View file

@ -0,0 +1,29 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef CAM_ICP_HW_INTF_H
#define CAM_ICP_HW_INTF_H
#define CAM_ICP_CMD_BUF_MAX_SIZE 128
#define CAM_ICP_MSG_BUF_MAX_SIZE CAM_ICP_CMD_BUF_MAX_SIZE
enum cam_a5_hw_type {
CAM_ICP_DEV_A5,
CAM_ICP_DEV_IPE,
CAM_ICP_DEV_BPS,
CAM_ICP_DEV_MAX,
};
/**
* struct cam_a5_clk_update_cmd - Payload for hw manager command
*
* @curr_clk_rate: clk rate to HW
* @ipe_bps_pc_enable power collpase enable flag
*/
struct cam_a5_clk_update_cmd {
uint32_t curr_clk_rate;
bool ipe_bps_pc_enable;
};
#endif

View file

@ -0,0 +1,37 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef CAM_IPE_HW_INTF_H
#define CAM_IPE_HW_INTF_H
#include <uapi/media/cam_defs.h>
#include <media/cam_icp.h>
#include "cam_hw_mgr_intf.h"
#include "cam_icp_hw_intf.h"
/* IPE registers */
#define IPE_TOP_RST_CMD 0x1008
#define IPE_CDM_RST_CMD 0x10
#define IPE_CDM_IRQ_STATUS 0x44
#define IPE_TOP_IRQ_STATUS 0x100C
/* IPE CDM/TOP status register */
#define IPE_RST_DONE_IRQ_STATUS_BIT 0x1
enum cam_icp_ipe_cmd_type {
CAM_ICP_IPE_CMD_FW_DOWNLOAD,
CAM_ICP_IPE_CMD_POWER_COLLAPSE,
CAM_ICP_IPE_CMD_POWER_RESUME,
CAM_ICP_IPE_CMD_SET_FW_BUF,
CAM_ICP_IPE_CMD_VOTE_CPAS,
CAM_ICP_IPE_CMD_CPAS_START,
CAM_ICP_IPE_CMD_CPAS_STOP,
CAM_ICP_IPE_CMD_UPDATE_CLK,
CAM_ICP_IPE_CMD_DISABLE_CLK,
CAM_ICP_IPE_CMD_RESET,
CAM_ICP_IPE_CMD_MAX,
};
#endif /* CAM_IPE_HW_INTF_H */

View file

@ -0,0 +1,40 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef CAM_ICP_HW_MGR_INTF_H
#define CAM_ICP_HW_MGR_INTF_H
#include <uapi/media/cam_icp.h>
#include <uapi/media/cam_defs.h>
#include <linux/of.h>
#include "cam_cpas_api.h"
#define ICP_CLK_TURBO_HZ 600000000
#define ICP_CLK_SVS_HZ 400000000
#define CAM_ICP_A5_BW_BYTES_VOTE 40000000
#define CAM_ICP_CTX_MAX 36
#define CPAS_IPE1_BIT 0x2000
int cam_icp_hw_mgr_init(struct device_node *of_node,
uint64_t *hw_mgr_hdl, int *iommu_hdl);
/**
* struct cam_icp_cpas_vote
* @ahb_vote: AHB vote info
* @axi_vote: AXI vote info
* @ahb_vote_valid: Flag for ahb vote data
* @axi_vote_valid: flag for axi vote data
*/
struct cam_icp_cpas_vote {
struct cam_ahb_vote ahb_vote;
struct cam_axi_vote axi_vote;
uint32_t ahb_vote_valid;
uint32_t axi_vote_valid;
};
#endif /* CAM_ICP_HW_MGR_INTF_H */

View file

@ -0,0 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw
ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/fw_inc
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
obj-$(CONFIG_SPECTRA_CAMERA) += ipe_dev.o ipe_core.o ipe_soc.o

View file

@ -0,0 +1,390 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/of.h>
#include <linux/debugfs.h>
#include <linux/videodev2.h>
#include <linux/uaccess.h>
#include <linux/platform_device.h>
#include <linux/firmware.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/iopoll.h>
#include "cam_io_util.h"
#include "cam_hw.h"
#include "cam_hw_intf.h"
#include "ipe_core.h"
#include "ipe_soc.h"
#include "cam_soc_util.h"
#include "cam_io_util.h"
#include "cam_ipe_hw_intf.h"
#include "cam_icp_hw_mgr_intf.h"
#include "cam_cpas_api.h"
#include "cam_debug_util.h"
#include "hfi_reg.h"
#define HFI_MAX_POLL_TRY 5
static int cam_ipe_caps_vote(struct cam_ipe_device_core_info *core_info,
struct cam_icp_cpas_vote *cpas_vote)
{
int rc = 0;
if (cpas_vote->ahb_vote_valid)
rc = cam_cpas_update_ahb_vote(core_info->cpas_handle,
&cpas_vote->ahb_vote);
if (cpas_vote->axi_vote_valid)
rc = cam_cpas_update_axi_vote(core_info->cpas_handle,
&cpas_vote->axi_vote);
if (rc)
CAM_ERR(CAM_ICP, "cpas vote is failed: %d", rc);
return rc;
}
int cam_ipe_init_hw(void *device_priv,
void *init_hw_args, uint32_t arg_size)
{
struct cam_hw_info *ipe_dev = device_priv;
struct cam_hw_soc_info *soc_info = NULL;
struct cam_ipe_device_core_info *core_info = NULL;
struct cam_icp_cpas_vote cpas_vote;
int rc = 0;
if (!device_priv) {
CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
return -EINVAL;
}
soc_info = &ipe_dev->soc_info;
core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
if ((!soc_info) || (!core_info)) {
CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
soc_info, core_info);
return -EINVAL;
}
cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
cpas_vote.axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
cpas_vote.axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
rc = cam_cpas_start(core_info->cpas_handle,
&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
if (rc) {
CAM_ERR(CAM_ICP, "cpass start failed: %d", rc);
return rc;
}
core_info->cpas_start = true;
rc = cam_ipe_enable_soc_resources(soc_info);
if (rc) {
CAM_ERR(CAM_ICP, "soc enable is failed : %d", rc);
if (cam_cpas_stop(core_info->cpas_handle))
CAM_ERR(CAM_ICP, "cpas stop is failed");
else
core_info->cpas_start = false;
} else {
core_info->clk_enable = true;
}
return rc;
}
int cam_ipe_deinit_hw(void *device_priv,
void *init_hw_args, uint32_t arg_size)
{
struct cam_hw_info *ipe_dev = device_priv;
struct cam_hw_soc_info *soc_info = NULL;
struct cam_ipe_device_core_info *core_info = NULL;
int rc = 0;
if (!device_priv) {
CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
return -EINVAL;
}
soc_info = &ipe_dev->soc_info;
core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
if ((!soc_info) || (!core_info)) {
CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
soc_info, core_info);
return -EINVAL;
}
rc = cam_ipe_disable_soc_resources(soc_info, core_info->clk_enable);
if (rc)
CAM_ERR(CAM_ICP, "soc disable is failed : %d", rc);
core_info->clk_enable = false;
if (core_info->cpas_start) {
if (cam_cpas_stop(core_info->cpas_handle))
CAM_ERR(CAM_ICP, "cpas stop is failed");
else
core_info->cpas_start = false;
}
return rc;
}
static int cam_ipe_handle_pc(struct cam_hw_info *ipe_dev)
{
struct cam_hw_soc_info *soc_info = NULL;
struct cam_ipe_device_core_info *core_info = NULL;
struct cam_ipe_device_hw_info *hw_info = NULL;
int pwr_ctrl;
int pwr_status;
soc_info = &ipe_dev->soc_info;
core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
hw_info = core_info->ipe_hw_info;
cam_cpas_reg_read(core_info->cpas_handle,
CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl,
true, &pwr_ctrl);
if (!(pwr_ctrl & IPE_COLLAPSE_MASK)) {
cam_cpas_reg_read(core_info->cpas_handle,
CAM_CPAS_REG_CPASTOP, hw_info->pwr_status,
true, &pwr_status);
cam_cpas_reg_write(core_info->cpas_handle,
CAM_CPAS_REG_CPASTOP,
hw_info->pwr_ctrl, true, 0x1);
if (pwr_status >> IPE_PWR_ON_MASK)
return -EINVAL;
}
cam_ipe_get_gdsc_control(soc_info);
cam_cpas_reg_read(core_info->cpas_handle,
CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl,
true, &pwr_ctrl);
cam_cpas_reg_read(core_info->cpas_handle,
CAM_CPAS_REG_CPASTOP, hw_info->pwr_status,
true, &pwr_status);
CAM_DBG(CAM_ICP, "pwr_ctrl = %x pwr_status = %x",
pwr_ctrl, pwr_status);
return 0;
}
static int cam_ipe_handle_resume(struct cam_hw_info *ipe_dev)
{
struct cam_hw_soc_info *soc_info = NULL;
struct cam_ipe_device_core_info *core_info = NULL;
struct cam_ipe_device_hw_info *hw_info = NULL;
int pwr_ctrl;
int pwr_status;
int rc = 0;
soc_info = &ipe_dev->soc_info;
core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
hw_info = core_info->ipe_hw_info;
cam_cpas_reg_read(core_info->cpas_handle,
CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl,
true, &pwr_ctrl);
if (pwr_ctrl & IPE_COLLAPSE_MASK) {
CAM_DBG(CAM_ICP, "IPE pwr_ctrl set(%x)", pwr_ctrl);
cam_cpas_reg_write(core_info->cpas_handle,
CAM_CPAS_REG_CPASTOP,
hw_info->pwr_ctrl, true, 0);
}
rc = cam_ipe_transfer_gdsc_control(soc_info);
cam_cpas_reg_read(core_info->cpas_handle,
CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl, true, &pwr_ctrl);
cam_cpas_reg_read(core_info->cpas_handle,
CAM_CPAS_REG_CPASTOP, hw_info->pwr_status,
true, &pwr_status);
CAM_DBG(CAM_ICP, "pwr_ctrl = %x pwr_status = %x",
pwr_ctrl, pwr_status);
return rc;
}
static int cam_ipe_cmd_reset(struct cam_hw_soc_info *soc_info,
struct cam_ipe_device_core_info *core_info)
{
int pwr_ctrl, pwr_status, rc = 0;
uint32_t status = 0, retry_cnt = 0;
bool reset_ipe_cdm_fail = false;
bool reset_ipe_top_fail = false;
CAM_DBG(CAM_ICP, "CAM_ICP_IPE_CMD_RESET");
/* IPE CDM core reset*/
cam_io_w_mb((uint32_t)0xF,
soc_info->reg_map[0].mem_base + IPE_CDM_RST_CMD);
while (retry_cnt < HFI_MAX_POLL_TRY) {
readw_poll_timeout((soc_info->reg_map[0].mem_base +
IPE_CDM_IRQ_STATUS),
status, ((status & IPE_RST_DONE_IRQ_STATUS_BIT) == 0x1),
100, 10000);
CAM_DBG(CAM_HFI, "ipe_cdm_irq_status = %u", status);
if ((status & IPE_RST_DONE_IRQ_STATUS_BIT) == 0x1)
break;
retry_cnt++;
}
status = cam_io_r_mb(soc_info->reg_map[0].mem_base +
IPE_CDM_IRQ_STATUS);
if ((status & IPE_RST_DONE_IRQ_STATUS_BIT) != 0x1) {
CAM_ERR(CAM_ICP, "IPE CDM rst failed status 0x%x", status);
reset_ipe_cdm_fail = true;
}
/* IPE reset*/
status = 0;
cam_io_w_mb((uint32_t)0x3,
soc_info->reg_map[0].mem_base + IPE_TOP_RST_CMD);
while (retry_cnt < HFI_MAX_POLL_TRY) {
readw_poll_timeout((soc_info->reg_map[0].mem_base +
IPE_TOP_IRQ_STATUS),
status, ((status & IPE_RST_DONE_IRQ_STATUS_BIT) == 0x1),
100, 10000);
CAM_DBG(CAM_HFI, "ipe_top_irq_status = %u", status);
if ((status & IPE_RST_DONE_IRQ_STATUS_BIT) == 0x1)
break;
retry_cnt++;
}
status = cam_io_r_mb(soc_info->reg_map[0].mem_base +
IPE_TOP_IRQ_STATUS);
if ((status & IPE_RST_DONE_IRQ_STATUS_BIT) != 0x1) {
CAM_ERR(CAM_ICP, "IPE top rst failed status 0x%x", status);
reset_ipe_top_fail = true;
}
cam_ipe_get_gdsc_control(soc_info);
cam_cpas_reg_read(core_info->cpas_handle,
CAM_CPAS_REG_CPASTOP, core_info->ipe_hw_info->pwr_ctrl,
true, &pwr_ctrl);
cam_cpas_reg_read(core_info->cpas_handle,
CAM_CPAS_REG_CPASTOP, core_info->ipe_hw_info->pwr_status,
true, &pwr_status);
CAM_DBG(CAM_ICP, "(After)pwr_ctrl = %x pwr_status = %x",
pwr_ctrl, pwr_status);
if (reset_ipe_cdm_fail || reset_ipe_top_fail)
rc = -EAGAIN;
return rc;
}
int cam_ipe_process_cmd(void *device_priv, uint32_t cmd_type,
void *cmd_args, uint32_t arg_size)
{
struct cam_hw_info *ipe_dev = device_priv;
struct cam_hw_soc_info *soc_info = NULL;
struct cam_ipe_device_core_info *core_info = NULL;
struct cam_ipe_device_hw_info *hw_info = NULL;
int rc = 0;
if (!device_priv) {
CAM_ERR(CAM_ICP, "Invalid arguments");
return -EINVAL;
}
if (cmd_type >= CAM_ICP_IPE_CMD_MAX) {
CAM_ERR(CAM_ICP, "Invalid command : %x", cmd_type);
return -EINVAL;
}
soc_info = &ipe_dev->soc_info;
core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
hw_info = core_info->ipe_hw_info;
switch (cmd_type) {
case CAM_ICP_IPE_CMD_VOTE_CPAS: {
struct cam_icp_cpas_vote *cpas_vote = cmd_args;
if (!cmd_args)
return -EINVAL;
cam_ipe_caps_vote(core_info, cpas_vote);
break;
}
case CAM_ICP_IPE_CMD_CPAS_START: {
struct cam_icp_cpas_vote *cpas_vote = cmd_args;
if (!cmd_args)
return -EINVAL;
if (!core_info->cpas_start) {
rc = cam_cpas_start(core_info->cpas_handle,
&cpas_vote->ahb_vote, &cpas_vote->axi_vote);
core_info->cpas_start = true;
}
break;
}
case CAM_ICP_IPE_CMD_CPAS_STOP:
if (core_info->cpas_start) {
cam_cpas_stop(core_info->cpas_handle);
core_info->cpas_start = false;
}
break;
case CAM_ICP_IPE_CMD_POWER_COLLAPSE:
rc = cam_ipe_handle_pc(ipe_dev);
break;
case CAM_ICP_IPE_CMD_POWER_RESUME:
rc = cam_ipe_handle_resume(ipe_dev);
break;
case CAM_ICP_IPE_CMD_UPDATE_CLK: {
struct cam_a5_clk_update_cmd *clk_upd_cmd =
(struct cam_a5_clk_update_cmd *)cmd_args;
uint32_t clk_rate = clk_upd_cmd->curr_clk_rate;
CAM_DBG(CAM_ICP, "ipe_src_clk rate = %d", (int)clk_rate);
if (!core_info->clk_enable) {
if (clk_upd_cmd->ipe_bps_pc_enable) {
cam_ipe_handle_pc(ipe_dev);
cam_cpas_reg_write(core_info->cpas_handle,
CAM_CPAS_REG_CPASTOP,
hw_info->pwr_ctrl, true, 0x0);
}
rc = cam_ipe_toggle_clk(soc_info, true);
if (rc)
CAM_ERR(CAM_ICP, "Enable failed");
else
core_info->clk_enable = true;
if (clk_upd_cmd->ipe_bps_pc_enable) {
rc = cam_ipe_handle_resume(ipe_dev);
if (rc)
CAM_ERR(CAM_ICP, "bps resume failed");
}
}
CAM_DBG(CAM_ICP, "clock rate %d", clk_rate);
rc = cam_ipe_update_clk_rate(soc_info, clk_rate);
if (rc)
CAM_ERR(CAM_ICP, "Failed to update clk");
}
break;
case CAM_ICP_IPE_CMD_DISABLE_CLK:
if (core_info->clk_enable == true)
cam_ipe_toggle_clk(soc_info, false);
core_info->clk_enable = false;
break;
case CAM_ICP_IPE_CMD_RESET:
rc = cam_ipe_cmd_reset(soc_info, core_info);
break;
default:
CAM_ERR(CAM_ICP, "Invalid Cmd Type:%u", cmd_type);
rc = -EINVAL;
break;
}
return rc;
}
irqreturn_t cam_ipe_irq(int irq_num, void *data)
{
return IRQ_HANDLED;
}

View file

@ -0,0 +1,40 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef CAM_IPE_CORE_H
#define CAM_IPE_CORE_H
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/dma-buf.h>
#define IPE_COLLAPSE_MASK 0x1
#define IPE_PWR_ON_MASK 0x2
struct cam_ipe_device_hw_info {
uint32_t hw_idx;
uint32_t pwr_ctrl;
uint32_t pwr_status;
uint32_t reserved;
};
struct cam_ipe_device_core_info {
struct cam_ipe_device_hw_info *ipe_hw_info;
uint32_t cpas_handle;
bool cpas_start;
bool clk_enable;
};
int cam_ipe_init_hw(void *device_priv,
void *init_hw_args, uint32_t arg_size);
int cam_ipe_deinit_hw(void *device_priv,
void *init_hw_args, uint32_t arg_size);
int cam_ipe_process_cmd(void *device_priv, uint32_t cmd_type,
void *cmd_args, uint32_t arg_size);
irqreturn_t cam_ipe_irq(int irq_num, void *data);
#endif /* CAM_IPE_CORE_H */

View file

@ -0,0 +1,199 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mod_devicetable.h>
#include <linux/of_device.h>
#include <linux/timer.h>
#include "ipe_core.h"
#include "ipe_soc.h"
#include "cam_hw.h"
#include "cam_hw_intf.h"
#include "cam_io_util.h"
#include "cam_icp_hw_intf.h"
#include "cam_icp_hw_mgr_intf.h"
#include "cam_cpas_api.h"
#include "cam_debug_util.h"
static struct cam_ipe_device_hw_info cam_ipe_hw_info[] = {
{
.hw_idx = 0,
.pwr_ctrl = 0x4c,
.pwr_status = 0x48,
.reserved = 0,
},
{
.hw_idx = 1,
.pwr_ctrl = 0x54,
.pwr_status = 0x50,
.reserved = 0,
},
};
EXPORT_SYMBOL(cam_ipe_hw_info);
static char ipe_dev_name[8];
int cam_ipe_register_cpas(struct cam_hw_soc_info *soc_info,
struct cam_ipe_device_core_info *core_info,
uint32_t hw_idx)
{
struct cam_cpas_register_params cpas_register_params;
int rc;
cpas_register_params.dev = &soc_info->pdev->dev;
memcpy(cpas_register_params.identifier, "ipe", sizeof("ipe"));
cpas_register_params.cam_cpas_client_cb = NULL;
cpas_register_params.cell_index = hw_idx;
cpas_register_params.userdata = NULL;
rc = cam_cpas_register_client(&cpas_register_params);
if (rc < 0) {
CAM_ERR(CAM_ICP, "failed: %d", rc);
return rc;
}
core_info->cpas_handle = cpas_register_params.client_handle;
return rc;
}
int cam_ipe_probe(struct platform_device *pdev)
{
struct cam_hw_info *ipe_dev = NULL;
struct cam_hw_intf *ipe_dev_intf = NULL;
const struct of_device_id *match_dev = NULL;
struct cam_ipe_device_core_info *core_info = NULL;
struct cam_ipe_device_hw_info *hw_info = NULL;
int rc = 0;
struct cam_cpas_query_cap query;
uint32_t cam_caps;
uint32_t hw_idx;
of_property_read_u32(pdev->dev.of_node,
"cell-index", &hw_idx);
cam_cpas_get_hw_info(&query.camera_family,
&query.camera_version, &query.cpas_version, &cam_caps);
if ((!(cam_caps & CPAS_IPE1_BIT)) && (hw_idx)) {
CAM_ERR(CAM_ICP, "IPE1 hw idx = %d\n", hw_idx);
return -EINVAL;
}
ipe_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
if (!ipe_dev_intf)
return -ENOMEM;
ipe_dev_intf->hw_idx = hw_idx;
ipe_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
if (!ipe_dev) {
kfree(ipe_dev_intf);
return -ENOMEM;
}
memset(ipe_dev_name, 0, sizeof(ipe_dev_name));
snprintf(ipe_dev_name, sizeof(ipe_dev_name),
"ipe%1u", ipe_dev_intf->hw_idx);
ipe_dev->soc_info.pdev = pdev;
ipe_dev->soc_info.dev = &pdev->dev;
ipe_dev->soc_info.dev_name = ipe_dev_name;
ipe_dev_intf->hw_priv = ipe_dev;
ipe_dev_intf->hw_ops.init = cam_ipe_init_hw;
ipe_dev_intf->hw_ops.deinit = cam_ipe_deinit_hw;
ipe_dev_intf->hw_ops.process_cmd = cam_ipe_process_cmd;
ipe_dev_intf->hw_type = CAM_ICP_DEV_IPE;
CAM_DBG(CAM_ICP, "type %d index %d",
ipe_dev_intf->hw_type,
ipe_dev_intf->hw_idx);
platform_set_drvdata(pdev, ipe_dev_intf);
ipe_dev->core_info = kzalloc(sizeof(struct cam_ipe_device_core_info),
GFP_KERNEL);
if (!ipe_dev->core_info) {
kfree(ipe_dev);
kfree(ipe_dev_intf);
return -ENOMEM;
}
core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
match_dev = of_match_device(pdev->dev.driver->of_match_table,
&pdev->dev);
if (!match_dev) {
CAM_DBG(CAM_ICP, "No ipe hardware info");
kfree(ipe_dev->core_info);
kfree(ipe_dev);
kfree(ipe_dev_intf);
rc = -EINVAL;
return rc;
}
hw_info = &cam_ipe_hw_info[ipe_dev_intf->hw_idx];
core_info->ipe_hw_info = hw_info;
rc = cam_ipe_init_soc_resources(&ipe_dev->soc_info, cam_ipe_irq,
ipe_dev);
if (rc < 0) {
CAM_ERR(CAM_ICP, "failed to init_soc");
kfree(ipe_dev->core_info);
kfree(ipe_dev);
kfree(ipe_dev_intf);
return rc;
}
CAM_DBG(CAM_ICP, "cam_ipe_init_soc_resources : %pK",
(void *)&ipe_dev->soc_info);
rc = cam_ipe_register_cpas(&ipe_dev->soc_info,
core_info, ipe_dev_intf->hw_idx);
if (rc < 0) {
kfree(ipe_dev->core_info);
kfree(ipe_dev);
kfree(ipe_dev_intf);
return rc;
}
ipe_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
mutex_init(&ipe_dev->hw_mutex);
spin_lock_init(&ipe_dev->hw_lock);
init_completion(&ipe_dev->hw_complete);
CAM_DBG(CAM_ICP, "IPE%d probe successful",
ipe_dev_intf->hw_idx);
return rc;
}
static const struct of_device_id cam_ipe_dt_match[] = {
{
.compatible = "qcom,cam-ipe",
.data = &cam_ipe_hw_info,
},
{}
};
MODULE_DEVICE_TABLE(of, cam_ipe_dt_match);
static struct platform_driver cam_ipe_driver = {
.probe = cam_ipe_probe,
.driver = {
.name = "cam-ipe",
.owner = THIS_MODULE,
.of_match_table = cam_ipe_dt_match,
.suppress_bind_attrs = true,
},
};
static int __init cam_ipe_init_module(void)
{
return platform_driver_register(&cam_ipe_driver);
}
static void __exit cam_ipe_exit_module(void)
{
platform_driver_unregister(&cam_ipe_driver);
}
module_init(cam_ipe_init_module);
module_exit(cam_ipe_exit_module);
MODULE_DESCRIPTION("CAM IPE driver");
MODULE_LICENSE("GPL v2");

View file

@ -0,0 +1,166 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/io.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/dma-buf.h>
#include <media/cam_defs.h>
#include <media/cam_icp.h>
#include "ipe_soc.h"
#include "cam_soc_util.h"
#include "cam_debug_util.h"
int cam_ipe_transfer_gdsc_control(struct cam_hw_soc_info *soc_info)
{
int i;
int rc;
for (i = 0; i < soc_info->num_rgltr; i++) {
rc = regulator_set_mode(soc_info->rgltr[i],
REGULATOR_MODE_FAST);
if (rc) {
CAM_ERR(CAM_ICP, "Regulator set mode %s failed",
soc_info->rgltr_name[i]);
goto rgltr_set_mode_failed;
}
}
return 0;
rgltr_set_mode_failed:
for (i = i - 1; i >= 0; i--)
if (soc_info->rgltr[i])
regulator_set_mode(soc_info->rgltr[i],
REGULATOR_MODE_NORMAL);
return rc;
}
int cam_ipe_get_gdsc_control(struct cam_hw_soc_info *soc_info)
{
int i;
int rc;
for (i = 0; i < soc_info->num_rgltr; i++) {
rc = regulator_set_mode(soc_info->rgltr[i],
REGULATOR_MODE_NORMAL);
if (rc) {
CAM_ERR(CAM_ICP, "Regulator set mode %s failed",
soc_info->rgltr_name[i]);
goto rgltr_set_mode_failed;
}
}
return 0;
rgltr_set_mode_failed:
for (i = i - 1; i >= 0; i--)
if (soc_info->rgltr[i])
regulator_set_mode(soc_info->rgltr[i],
REGULATOR_MODE_FAST);
return rc;
}
static int cam_ipe_get_dt_properties(struct cam_hw_soc_info *soc_info)
{
int rc = 0;
rc = cam_soc_util_get_dt_properties(soc_info);
if (rc < 0)
CAM_ERR(CAM_ICP, "get ipe dt prop is failed");
return rc;
}
static int cam_ipe_request_platform_resource(
struct cam_hw_soc_info *soc_info,
irq_handler_t ipe_irq_handler, void *irq_data)
{
int rc = 0;
rc = cam_soc_util_request_platform_resource(soc_info, ipe_irq_handler,
irq_data);
return rc;
}
int cam_ipe_init_soc_resources(struct cam_hw_soc_info *soc_info,
irq_handler_t ipe_irq_handler, void *irq_data)
{
int rc = 0;
rc = cam_ipe_get_dt_properties(soc_info);
if (rc < 0)
return rc;
rc = cam_ipe_request_platform_resource(soc_info, ipe_irq_handler,
irq_data);
if (rc < 0)
return rc;
return rc;
}
int cam_ipe_enable_soc_resources(struct cam_hw_soc_info *soc_info)
{
int rc = 0;
rc = cam_soc_util_enable_platform_resource(soc_info, true,
CAM_SVS_VOTE, false);
if (rc) {
CAM_ERR(CAM_ICP, "enable platform failed");
return rc;
}
return rc;
}
int cam_ipe_disable_soc_resources(struct cam_hw_soc_info *soc_info,
bool disable_clk)
{
int rc = 0;
rc = cam_soc_util_disable_platform_resource(soc_info, disable_clk,
false);
if (rc)
CAM_ERR(CAM_ICP, "enable platform failed");
return rc;
}
int cam_ipe_update_clk_rate(struct cam_hw_soc_info *soc_info,
uint32_t clk_rate)
{
int32_t src_clk_idx;
if (!soc_info)
return -EINVAL;
src_clk_idx = soc_info->src_clk_idx;
if ((soc_info->clk_level_valid[CAM_TURBO_VOTE] == true) &&
(soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx] != 0) &&
(clk_rate > soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx])) {
CAM_DBG(CAM_ICP, "clk_rate %d greater than max, reset to %d",
clk_rate,
soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx]);
clk_rate = soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx];
}
return cam_soc_util_set_src_clk_rate(soc_info, clk_rate);
}
int cam_ipe_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable)
{
int rc = 0;
if (clk_enable)
rc = cam_soc_util_clk_enable_default(soc_info, CAM_SVS_VOTE);
else
cam_soc_util_clk_disable_default(soc_info);
return rc;
}

Some files were not shown because too many files have changed in this diff Show more