1. Packages
  2. Ionoscloud
  3. API Docs
  4. kafka
  5. Topic
IonosCloud v0.2.3 published on Tuesday, May 13, 2025 by ionos-cloud

ionoscloud.kafka.Topic

Explore with Pulumi AI

ionoscloud logo
IonosCloud v0.2.3 published on Tuesday, May 13, 2025 by ionos-cloud

    Manages a Kafka Cluster Topic on IonosCloud.

    Example Usage

    This resource will create an operational Kafka Cluster Topic. After this section completes, the provisioner can be called.

    import * as pulumi from "@pulumi/pulumi";
    import * as ionoscloud from "@ionos-cloud/sdk-pulumi";
    
    // Basic example
    const example = new ionoscloud.compute.Datacenter("example", {
        name: "example-kafka-datacenter",
        location: "de/fra",
    });
    const exampleLan = new ionoscloud.compute.Lan("example", {
        datacenterId: example.id,
        "public": false,
        name: "example-kafka-lan",
    });
    const exampleCluster = new ionoscloud.kafka.Cluster("example", {
        name: "example-kafka-cluster",
        location: example.location,
        version: "3.7.0",
        size: "S",
        connections: {
            datacenterId: example.id,
            lanId: exampleLan.id,
            brokerAddresses: [
                "192.168.1.101/24",
                "192.168.1.102/24",
                "192.168.1.103/24",
            ],
        },
    });
    const exampleTopic = new ionoscloud.kafka.Topic("example", {
        clusterId: exampleCluster.id,
        name: "kafka-cluster-topic",
        location: exampleCluster.location,
        replicationFactor: 1,
        numberOfPartitions: 1,
        retentionTime: 86400000,
        segmentBytes: 1073741824,
    });
    
    import pulumi
    import pulumi_ionoscloud as ionoscloud
    
    # Basic example
    example = ionoscloud.compute.Datacenter("example",
        name="example-kafka-datacenter",
        location="de/fra")
    example_lan = ionoscloud.compute.Lan("example",
        datacenter_id=example.id,
        public=False,
        name="example-kafka-lan")
    example_cluster = ionoscloud.kafka.Cluster("example",
        name="example-kafka-cluster",
        location=example.location,
        version="3.7.0",
        size="S",
        connections={
            "datacenter_id": example.id,
            "lan_id": example_lan.id,
            "broker_addresses": [
                "192.168.1.101/24",
                "192.168.1.102/24",
                "192.168.1.103/24",
            ],
        })
    example_topic = ionoscloud.kafka.Topic("example",
        cluster_id=example_cluster.id,
        name="kafka-cluster-topic",
        location=example_cluster.location,
        replication_factor=1,
        number_of_partitions=1,
        retention_time=86400000,
        segment_bytes=1073741824)
    
    package main
    
    import (
    	"github.com/ionos-cloud/pulumi-ionoscloud/sdk/go/ionoscloud/compute"
    	"github.com/ionos-cloud/pulumi-ionoscloud/sdk/go/ionoscloud/kafka"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		// Basic example
    		example, err := compute.NewDatacenter(ctx, "example", &compute.DatacenterArgs{
    			Name:     pulumi.String("example-kafka-datacenter"),
    			Location: pulumi.String("de/fra"),
    		})
    		if err != nil {
    			return err
    		}
    		exampleLan, err := compute.NewLan(ctx, "example", &compute.LanArgs{
    			DatacenterId: example.ID(),
    			Public:       pulumi.Bool(false),
    			Name:         pulumi.String("example-kafka-lan"),
    		})
    		if err != nil {
    			return err
    		}
    		exampleCluster, err := kafka.NewCluster(ctx, "example", &kafka.ClusterArgs{
    			Name:     pulumi.String("example-kafka-cluster"),
    			Location: example.Location,
    			Version:  pulumi.String("3.7.0"),
    			Size:     pulumi.String("S"),
    			Connections: &kafka.ClusterConnectionsArgs{
    				DatacenterId: example.ID(),
    				LanId:        exampleLan.ID(),
    				BrokerAddresses: pulumi.StringArray{
    					pulumi.String("192.168.1.101/24"),
    					pulumi.String("192.168.1.102/24"),
    					pulumi.String("192.168.1.103/24"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = kafka.NewTopic(ctx, "example", &kafka.TopicArgs{
    			ClusterId:          exampleCluster.ID(),
    			Name:               pulumi.String("kafka-cluster-topic"),
    			Location:           exampleCluster.Location,
    			ReplicationFactor:  pulumi.Int(1),
    			NumberOfPartitions: pulumi.Int(1),
    			RetentionTime:      pulumi.Int(86400000),
    			SegmentBytes:       pulumi.Int(1073741824),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Ionoscloud = Ionoscloud.Pulumi.Ionoscloud;
    
    return await Deployment.RunAsync(() => 
    {
        // Basic example
        var example = new Ionoscloud.Compute.Datacenter("example", new()
        {
            Name = "example-kafka-datacenter",
            Location = "de/fra",
        });
    
        var exampleLan = new Ionoscloud.Compute.Lan("example", new()
        {
            DatacenterId = example.Id,
            Public = false,
            Name = "example-kafka-lan",
        });
    
        var exampleCluster = new Ionoscloud.Kafka.Cluster("example", new()
        {
            Name = "example-kafka-cluster",
            Location = example.Location,
            Version = "3.7.0",
            Size = "S",
            Connections = new Ionoscloud.Kafka.Inputs.ClusterConnectionsArgs
            {
                DatacenterId = example.Id,
                LanId = exampleLan.Id,
                BrokerAddresses = new[]
                {
                    "192.168.1.101/24",
                    "192.168.1.102/24",
                    "192.168.1.103/24",
                },
            },
        });
    
        var exampleTopic = new Ionoscloud.Kafka.Topic("example", new()
        {
            ClusterId = exampleCluster.Id,
            Name = "kafka-cluster-topic",
            Location = exampleCluster.Location,
            ReplicationFactor = 1,
            NumberOfPartitions = 1,
            RetentionTime = 86400000,
            SegmentBytes = 1073741824,
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.ionoscloud.compute.Datacenter;
    import com.pulumi.ionoscloud.compute.DatacenterArgs;
    import com.pulumi.ionoscloud.compute.Lan;
    import com.pulumi.ionoscloud.compute.LanArgs;
    import com.pulumi.ionoscloud.kafka.Cluster;
    import com.pulumi.ionoscloud.kafka.ClusterArgs;
    import com.pulumi.ionoscloud.kafka.inputs.ClusterConnectionsArgs;
    import com.pulumi.ionoscloud.kafka.Topic;
    import com.pulumi.ionoscloud.kafka.TopicArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            // Basic example
            var example = new Datacenter("example", DatacenterArgs.builder()
                .name("example-kafka-datacenter")
                .location("de/fra")
                .build());
    
            var exampleLan = new Lan("exampleLan", LanArgs.builder()
                .datacenterId(example.id())
                .public_(false)
                .name("example-kafka-lan")
                .build());
    
            var exampleCluster = new Cluster("exampleCluster", ClusterArgs.builder()
                .name("example-kafka-cluster")
                .location(example.location())
                .version("3.7.0")
                .size("S")
                .connections(ClusterConnectionsArgs.builder()
                    .datacenterId(example.id())
                    .lanId(exampleLan.id())
                    .brokerAddresses(                
                        "192.168.1.101/24",
                        "192.168.1.102/24",
                        "192.168.1.103/24")
                    .build())
                .build());
    
            var exampleTopic = new Topic("exampleTopic", TopicArgs.builder()
                .clusterId(exampleCluster.id())
                .name("kafka-cluster-topic")
                .location(exampleCluster.location())
                .replicationFactor(1)
                .numberOfPartitions(1)
                .retentionTime(86400000)
                .segmentBytes(1073741824)
                .build());
    
        }
    }
    
    resources:
      # Basic example
      example:
        type: ionoscloud:compute:Datacenter
        properties:
          name: example-kafka-datacenter
          location: de/fra
      exampleLan:
        type: ionoscloud:compute:Lan
        name: example
        properties:
          datacenterId: ${example.id}
          public: false
          name: example-kafka-lan
      exampleCluster:
        type: ionoscloud:kafka:Cluster
        name: example
        properties:
          name: example-kafka-cluster
          location: ${example.location}
          version: 3.7.0
          size: S
          connections:
            datacenterId: ${example.id}
            lanId: ${exampleLan.id}
            brokerAddresses:
              - 192.168.1.101/24
              - 192.168.1.102/24
              - 192.168.1.103/24
      exampleTopic:
        type: ionoscloud:kafka:Topic
        name: example
        properties:
          clusterId: ${exampleCluster.id}
          name: kafka-cluster-topic
          location: ${exampleCluster.location}
          replicationFactor: 1
          numberOfPartitions: 1
          retentionTime: 8.64e+07
          segmentBytes: 1.073741824e+09
    
    import * as pulumi from "@pulumi/pulumi";
    import * as ionoscloud from "@ionos-cloud/sdk-pulumi";
    import * as random from "@pulumi/random";
    
    // Complete example
    const example = new ionoscloud.compute.Datacenter("example", {
        name: "example-kafka-datacenter",
        location: "de/fra",
    });
    const exampleLan = new ionoscloud.compute.Lan("example", {
        datacenterId: example.id,
        "public": false,
        name: "example-kafka-lan",
    });
    const password = new random.index.Password("password", {
        length: 16,
        special: false,
    });
    const exampleServer = new ionoscloud.compute.Server("example", {
        name: "example-kafka-server",
        datacenterId: example.id,
        cores: 1,
        ram: 2 * 1024,
        availabilityZone: "AUTO",
        cpuFamily: "INTEL_SKYLAKE",
        imageName: "ubuntu:latest",
        imagePassword: password.result,
        volume: {
            name: "example-kafka-volume",
            size: 6,
            diskType: "SSD Standard",
        },
        nic: {
            lan: exampleLan.id,
            name: "example-kafka-nic",
            dhcp: true,
        },
    });
    const exampleCluster = new ionoscloud.kafka.Cluster("example", {
        name: "example-kafka-cluster",
        location: example.location,
        version: "3.7.0",
        size: "S",
        connections: {
            datacenterId: example.id,
            lanId: exampleLan.id,
            brokerAddresses: "kafka_cluster_broker_ips_cidr_list",
        },
    });
    const exampleTopic = new ionoscloud.kafka.Topic("example", {
        clusterId: exampleCluster.id,
        name: "kafka-cluster-topic",
        location: exampleCluster.location,
        replicationFactor: 1,
        numberOfPartitions: 1,
        retentionTime: 86400000,
        segmentBytes: 1073741824,
    });
    
    import pulumi
    import pulumi_ionoscloud as ionoscloud
    import pulumi_random as random
    
    # Complete example
    example = ionoscloud.compute.Datacenter("example",
        name="example-kafka-datacenter",
        location="de/fra")
    example_lan = ionoscloud.compute.Lan("example",
        datacenter_id=example.id,
        public=False,
        name="example-kafka-lan")
    password = random.index.Password("password",
        length=16,
        special=False)
    example_server = ionoscloud.compute.Server("example",
        name="example-kafka-server",
        datacenter_id=example.id,
        cores=1,
        ram=2 * 1024,
        availability_zone="AUTO",
        cpu_family="INTEL_SKYLAKE",
        image_name="ubuntu:latest",
        image_password=password["result"],
        volume={
            "name": "example-kafka-volume",
            "size": 6,
            "disk_type": "SSD Standard",
        },
        nic={
            "lan": example_lan.id,
            "name": "example-kafka-nic",
            "dhcp": True,
        })
    example_cluster = ionoscloud.kafka.Cluster("example",
        name="example-kafka-cluster",
        location=example.location,
        version="3.7.0",
        size="S",
        connections={
            "datacenter_id": example.id,
            "lan_id": example_lan.id,
            "broker_addresses": "kafka_cluster_broker_ips_cidr_list",
        })
    example_topic = ionoscloud.kafka.Topic("example",
        cluster_id=example_cluster.id,
        name="kafka-cluster-topic",
        location=example_cluster.location,
        replication_factor=1,
        number_of_partitions=1,
        retention_time=86400000,
        segment_bytes=1073741824)
    
    package main
    
    import (
    	"github.com/ionos-cloud/pulumi-ionoscloud/sdk/go/ionoscloud/compute"
    	"github.com/ionos-cloud/pulumi-ionoscloud/sdk/go/ionoscloud/kafka"
    	"github.com/pulumi/pulumi-random/sdk/go/random"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		// Complete example
    		example, err := compute.NewDatacenter(ctx, "example", &compute.DatacenterArgs{
    			Name:     pulumi.String("example-kafka-datacenter"),
    			Location: pulumi.String("de/fra"),
    		})
    		if err != nil {
    			return err
    		}
    		exampleLan, err := compute.NewLan(ctx, "example", &compute.LanArgs{
    			DatacenterId: example.ID(),
    			Public:       pulumi.Bool(false),
    			Name:         pulumi.String("example-kafka-lan"),
    		})
    		if err != nil {
    			return err
    		}
    		password, err := random.NewPassword(ctx, "password", &random.PasswordArgs{
    			Length:  16,
    			Special: false,
    		})
    		if err != nil {
    			return err
    		}
    		_, err = compute.NewServer(ctx, "example", &compute.ServerArgs{
    			Name:             pulumi.String("example-kafka-server"),
    			DatacenterId:     example.ID(),
    			Cores:            pulumi.Int(1),
    			Ram:              int(2 * 1024),
    			AvailabilityZone: pulumi.String("AUTO"),
    			CpuFamily:        pulumi.String("INTEL_SKYLAKE"),
    			ImageName:        pulumi.String("ubuntu:latest"),
    			ImagePassword:    password.Result,
    			Volume: &compute.ServerVolumeArgs{
    				Name:     pulumi.String("example-kafka-volume"),
    				Size:     pulumi.Int(6),
    				DiskType: pulumi.String("SSD Standard"),
    			},
    			Nic: &compute.ServerNicArgs{
    				Lan:  exampleLan.ID(),
    				Name: pulumi.String("example-kafka-nic"),
    				Dhcp: pulumi.Bool(true),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		exampleCluster, err := kafka.NewCluster(ctx, "example", &kafka.ClusterArgs{
    			Name:     pulumi.String("example-kafka-cluster"),
    			Location: example.Location,
    			Version:  pulumi.String("3.7.0"),
    			Size:     pulumi.String("S"),
    			Connections: &kafka.ClusterConnectionsArgs{
    				DatacenterId:    example.ID(),
    				LanId:           exampleLan.ID(),
    				BrokerAddresses: pulumi.StringArray("kafka_cluster_broker_ips_cidr_list"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = kafka.NewTopic(ctx, "example", &kafka.TopicArgs{
    			ClusterId:          exampleCluster.ID(),
    			Name:               pulumi.String("kafka-cluster-topic"),
    			Location:           exampleCluster.Location,
    			ReplicationFactor:  pulumi.Int(1),
    			NumberOfPartitions: pulumi.Int(1),
    			RetentionTime:      pulumi.Int(86400000),
    			SegmentBytes:       pulumi.Int(1073741824),
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Ionoscloud = Ionoscloud.Pulumi.Ionoscloud;
    using Random = Pulumi.Random;
    
    return await Deployment.RunAsync(() => 
    {
        // Complete example
        var example = new Ionoscloud.Compute.Datacenter("example", new()
        {
            Name = "example-kafka-datacenter",
            Location = "de/fra",
        });
    
        var exampleLan = new Ionoscloud.Compute.Lan("example", new()
        {
            DatacenterId = example.Id,
            Public = false,
            Name = "example-kafka-lan",
        });
    
        var password = new Random.Index.Password("password", new()
        {
            Length = 16,
            Special = false,
        });
    
        var exampleServer = new Ionoscloud.Compute.Server("example", new()
        {
            Name = "example-kafka-server",
            DatacenterId = example.Id,
            Cores = 1,
            Ram = 2 * 1024,
            AvailabilityZone = "AUTO",
            CpuFamily = "INTEL_SKYLAKE",
            ImageName = "ubuntu:latest",
            ImagePassword = password.Result,
            Volume = new Ionoscloud.Compute.Inputs.ServerVolumeArgs
            {
                Name = "example-kafka-volume",
                Size = 6,
                DiskType = "SSD Standard",
            },
            Nic = new Ionoscloud.Compute.Inputs.ServerNicArgs
            {
                Lan = exampleLan.Id,
                Name = "example-kafka-nic",
                Dhcp = true,
            },
        });
    
        var exampleCluster = new Ionoscloud.Kafka.Cluster("example", new()
        {
            Name = "example-kafka-cluster",
            Location = example.Location,
            Version = "3.7.0",
            Size = "S",
            Connections = new Ionoscloud.Kafka.Inputs.ClusterConnectionsArgs
            {
                DatacenterId = example.Id,
                LanId = exampleLan.Id,
                BrokerAddresses = "kafka_cluster_broker_ips_cidr_list",
            },
        });
    
        var exampleTopic = new Ionoscloud.Kafka.Topic("example", new()
        {
            ClusterId = exampleCluster.Id,
            Name = "kafka-cluster-topic",
            Location = exampleCluster.Location,
            ReplicationFactor = 1,
            NumberOfPartitions = 1,
            RetentionTime = 86400000,
            SegmentBytes = 1073741824,
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.ionoscloud.compute.Datacenter;
    import com.pulumi.ionoscloud.compute.DatacenterArgs;
    import com.pulumi.ionoscloud.compute.Lan;
    import com.pulumi.ionoscloud.compute.LanArgs;
    import com.pulumi.random.password;
    import com.pulumi.random.PasswordArgs;
    import com.pulumi.ionoscloud.compute.Server;
    import com.pulumi.ionoscloud.compute.ServerArgs;
    import com.pulumi.ionoscloud.compute.inputs.ServerVolumeArgs;
    import com.pulumi.ionoscloud.compute.inputs.ServerNicArgs;
    import com.pulumi.ionoscloud.kafka.Cluster;
    import com.pulumi.ionoscloud.kafka.ClusterArgs;
    import com.pulumi.ionoscloud.kafka.inputs.ClusterConnectionsArgs;
    import com.pulumi.ionoscloud.kafka.Topic;
    import com.pulumi.ionoscloud.kafka.TopicArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            // Complete example
            var example = new Datacenter("example", DatacenterArgs.builder()
                .name("example-kafka-datacenter")
                .location("de/fra")
                .build());
    
            var exampleLan = new Lan("exampleLan", LanArgs.builder()
                .datacenterId(example.id())
                .public_(false)
                .name("example-kafka-lan")
                .build());
    
            var password = new Password("password", PasswordArgs.builder()
                .length(16)
                .special(false)
                .build());
    
            var exampleServer = new Server("exampleServer", ServerArgs.builder()
                .name("example-kafka-server")
                .datacenterId(example.id())
                .cores(1)
                .ram(2 * 1024)
                .availabilityZone("AUTO")
                .cpuFamily("INTEL_SKYLAKE")
                .imageName("ubuntu:latest")
                .imagePassword(password.result())
                .volume(ServerVolumeArgs.builder()
                    .name("example-kafka-volume")
                    .size(6)
                    .diskType("SSD Standard")
                    .build())
                .nic(ServerNicArgs.builder()
                    .lan(exampleLan.id())
                    .name("example-kafka-nic")
                    .dhcp(true)
                    .build())
                .build());
    
            var exampleCluster = new Cluster("exampleCluster", ClusterArgs.builder()
                .name("example-kafka-cluster")
                .location(example.location())
                .version("3.7.0")
                .size("S")
                .connections(ClusterConnectionsArgs.builder()
                    .datacenterId(example.id())
                    .lanId(exampleLan.id())
                    .brokerAddresses("kafka_cluster_broker_ips_cidr_list")
                    .build())
                .build());
    
            var exampleTopic = new Topic("exampleTopic", TopicArgs.builder()
                .clusterId(exampleCluster.id())
                .name("kafka-cluster-topic")
                .location(exampleCluster.location())
                .replicationFactor(1)
                .numberOfPartitions(1)
                .retentionTime(86400000)
                .segmentBytes(1073741824)
                .build());
    
        }
    }
    
    Coming soon!
    

    Create Topic Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new Topic(name: string, args: TopicArgs, opts?: CustomResourceOptions);
    @overload
    def Topic(resource_name: str,
              args: TopicArgs,
              opts: Optional[ResourceOptions] = None)
    
    @overload
    def Topic(resource_name: str,
              opts: Optional[ResourceOptions] = None,
              cluster_id: Optional[str] = None,
              location: Optional[str] = None,
              name: Optional[str] = None,
              number_of_partitions: Optional[int] = None,
              replication_factor: Optional[int] = None,
              retention_time: Optional[int] = None,
              segment_bytes: Optional[int] = None)
    func NewTopic(ctx *Context, name string, args TopicArgs, opts ...ResourceOption) (*Topic, error)
    public Topic(string name, TopicArgs args, CustomResourceOptions? opts = null)
    public Topic(String name, TopicArgs args)
    public Topic(String name, TopicArgs args, CustomResourceOptions options)
    
    type: ionoscloud:kafka:Topic
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args TopicArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args TopicArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args TopicArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args TopicArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args TopicArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var topicResource = new Ionoscloud.Kafka.Topic("topicResource", new()
    {
        ClusterId = "string",
        Location = "string",
        Name = "string",
        NumberOfPartitions = 0,
        ReplicationFactor = 0,
        RetentionTime = 0,
        SegmentBytes = 0,
    });
    
    example, err := kafka.NewTopic(ctx, "topicResource", &kafka.TopicArgs{
    	ClusterId:          pulumi.String("string"),
    	Location:           pulumi.String("string"),
    	Name:               pulumi.String("string"),
    	NumberOfPartitions: pulumi.Int(0),
    	ReplicationFactor:  pulumi.Int(0),
    	RetentionTime:      pulumi.Int(0),
    	SegmentBytes:       pulumi.Int(0),
    })
    
    var topicResource = new Topic("topicResource", TopicArgs.builder()
        .clusterId("string")
        .location("string")
        .name("string")
        .numberOfPartitions(0)
        .replicationFactor(0)
        .retentionTime(0)
        .segmentBytes(0)
        .build());
    
    topic_resource = ionoscloud.kafka.Topic("topicResource",
        cluster_id="string",
        location="string",
        name="string",
        number_of_partitions=0,
        replication_factor=0,
        retention_time=0,
        segment_bytes=0)
    
    const topicResource = new ionoscloud.kafka.Topic("topicResource", {
        clusterId: "string",
        location: "string",
        name: "string",
        numberOfPartitions: 0,
        replicationFactor: 0,
        retentionTime: 0,
        segmentBytes: 0,
    });
    
    type: ionoscloud:kafka:Topic
    properties:
        clusterId: string
        location: string
        name: string
        numberOfPartitions: 0
        replicationFactor: 0
        retentionTime: 0
        segmentBytes: 0
    

    Topic Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

    The Topic resource accepts the following input properties:

    ClusterId string
    [string] ID of the Kafka Cluster that the topic belongs to.
    Location string
    [string] The location of the Kafka Cluster Topic. Possible values: de/fra, de/txl. If this is not set and if no value is provided for the IONOS_API_URL env var, the default location will be: de/fra.
    Name string
    [string] Name of the Kafka Cluster.
    NumberOfPartitions int
    [int] The number of partitions of the topic. Partitions allow for parallel processing of messages. The partition count must be greater than or equal to the replication factor. Minimum value: 1. Default value: 3.
    ReplicationFactor int
    [int] The number of replicas of the topic. The replication factor determines how many copies of the topic are stored on different brokers. The replication factor must be less than or equal to the number of brokers in the Kafka Cluster. Minimum value: 1. Default value: 3.
    RetentionTime int
    [int] This configuration controls the maximum time we will retain a log before we will discard old log segments to free up space. This represents an SLA on how soon consumers must read their data. If set to -1, no time limit is applied. Default value: 604800000.
    SegmentBytes int
    [int] This configuration controls the segment file size for the log. Retention and cleaning is always done a file at a time so a larger segment size means fewer files but less granular control over retention. Default value: 1073741824.
    ClusterId string
    [string] ID of the Kafka Cluster that the topic belongs to.
    Location string
    [string] The location of the Kafka Cluster Topic. Possible values: de/fra, de/txl. If this is not set and if no value is provided for the IONOS_API_URL env var, the default location will be: de/fra.
    Name string
    [string] Name of the Kafka Cluster.
    NumberOfPartitions int
    [int] The number of partitions of the topic. Partitions allow for parallel processing of messages. The partition count must be greater than or equal to the replication factor. Minimum value: 1. Default value: 3.
    ReplicationFactor int
    [int] The number of replicas of the topic. The replication factor determines how many copies of the topic are stored on different brokers. The replication factor must be less than or equal to the number of brokers in the Kafka Cluster. Minimum value: 1. Default value: 3.
    RetentionTime int
    [int] This configuration controls the maximum time we will retain a log before we will discard old log segments to free up space. This represents an SLA on how soon consumers must read their data. If set to -1, no time limit is applied. Default value: 604800000.
    SegmentBytes int
    [int] This configuration controls the segment file size for the log. Retention and cleaning is always done a file at a time so a larger segment size means fewer files but less granular control over retention. Default value: 1073741824.
    clusterId String
    [string] ID of the Kafka Cluster that the topic belongs to.
    location String
    [string] The location of the Kafka Cluster Topic. Possible values: de/fra, de/txl. If this is not set and if no value is provided for the IONOS_API_URL env var, the default location will be: de/fra.
    name String
    [string] Name of the Kafka Cluster.
    numberOfPartitions Integer
    [int] The number of partitions of the topic. Partitions allow for parallel processing of messages. The partition count must be greater than or equal to the replication factor. Minimum value: 1. Default value: 3.
    replicationFactor Integer
    [int] The number of replicas of the topic. The replication factor determines how many copies of the topic are stored on different brokers. The replication factor must be less than or equal to the number of brokers in the Kafka Cluster. Minimum value: 1. Default value: 3.
    retentionTime Integer
    [int] This configuration controls the maximum time we will retain a log before we will discard old log segments to free up space. This represents an SLA on how soon consumers must read their data. If set to -1, no time limit is applied. Default value: 604800000.
    segmentBytes Integer
    [int] This configuration controls the segment file size for the log. Retention and cleaning is always done a file at a time so a larger segment size means fewer files but less granular control over retention. Default value: 1073741824.
    clusterId string
    [string] ID of the Kafka Cluster that the topic belongs to.
    location string
    [string] The location of the Kafka Cluster Topic. Possible values: de/fra, de/txl. If this is not set and if no value is provided for the IONOS_API_URL env var, the default location will be: de/fra.
    name string
    [string] Name of the Kafka Cluster.
    numberOfPartitions number
    [int] The number of partitions of the topic. Partitions allow for parallel processing of messages. The partition count must be greater than or equal to the replication factor. Minimum value: 1. Default value: 3.
    replicationFactor number
    [int] The number of replicas of the topic. The replication factor determines how many copies of the topic are stored on different brokers. The replication factor must be less than or equal to the number of brokers in the Kafka Cluster. Minimum value: 1. Default value: 3.
    retentionTime number
    [int] This configuration controls the maximum time we will retain a log before we will discard old log segments to free up space. This represents an SLA on how soon consumers must read their data. If set to -1, no time limit is applied. Default value: 604800000.
    segmentBytes number
    [int] This configuration controls the segment file size for the log. Retention and cleaning is always done a file at a time so a larger segment size means fewer files but less granular control over retention. Default value: 1073741824.
    cluster_id str
    [string] ID of the Kafka Cluster that the topic belongs to.
    location str
    [string] The location of the Kafka Cluster Topic. Possible values: de/fra, de/txl. If this is not set and if no value is provided for the IONOS_API_URL env var, the default location will be: de/fra.
    name str
    [string] Name of the Kafka Cluster.
    number_of_partitions int
    [int] The number of partitions of the topic. Partitions allow for parallel processing of messages. The partition count must be greater than or equal to the replication factor. Minimum value: 1. Default value: 3.
    replication_factor int
    [int] The number of replicas of the topic. The replication factor determines how many copies of the topic are stored on different brokers. The replication factor must be less than or equal to the number of brokers in the Kafka Cluster. Minimum value: 1. Default value: 3.
    retention_time int
    [int] This configuration controls the maximum time we will retain a log before we will discard old log segments to free up space. This represents an SLA on how soon consumers must read their data. If set to -1, no time limit is applied. Default value: 604800000.
    segment_bytes int
    [int] This configuration controls the segment file size for the log. Retention and cleaning is always done a file at a time so a larger segment size means fewer files but less granular control over retention. Default value: 1073741824.
    clusterId String
    [string] ID of the Kafka Cluster that the topic belongs to.
    location String
    [string] The location of the Kafka Cluster Topic. Possible values: de/fra, de/txl. If this is not set and if no value is provided for the IONOS_API_URL env var, the default location will be: de/fra.
    name String
    [string] Name of the Kafka Cluster.
    numberOfPartitions Number
    [int] The number of partitions of the topic. Partitions allow for parallel processing of messages. The partition count must be greater than or equal to the replication factor. Minimum value: 1. Default value: 3.
    replicationFactor Number
    [int] The number of replicas of the topic. The replication factor determines how many copies of the topic are stored on different brokers. The replication factor must be less than or equal to the number of brokers in the Kafka Cluster. Minimum value: 1. Default value: 3.
    retentionTime Number
    [int] This configuration controls the maximum time we will retain a log before we will discard old log segments to free up space. This represents an SLA on how soon consumers must read their data. If set to -1, no time limit is applied. Default value: 604800000.
    segmentBytes Number
    [int] This configuration controls the segment file size for the log. Retention and cleaning is always done a file at a time so a larger segment size means fewer files but less granular control over retention. Default value: 1073741824.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Topic resource produces the following output properties:

    Id string
    The provider-assigned unique ID for this managed resource.
    Id string
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.
    id string
    The provider-assigned unique ID for this managed resource.
    id str
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.

    Look up Existing Topic Resource

    Get an existing Topic resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: TopicState, opts?: CustomResourceOptions): Topic
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            cluster_id: Optional[str] = None,
            location: Optional[str] = None,
            name: Optional[str] = None,
            number_of_partitions: Optional[int] = None,
            replication_factor: Optional[int] = None,
            retention_time: Optional[int] = None,
            segment_bytes: Optional[int] = None) -> Topic
    func GetTopic(ctx *Context, name string, id IDInput, state *TopicState, opts ...ResourceOption) (*Topic, error)
    public static Topic Get(string name, Input<string> id, TopicState? state, CustomResourceOptions? opts = null)
    public static Topic get(String name, Output<String> id, TopicState state, CustomResourceOptions options)
    resources:  _:    type: ionoscloud:kafka:Topic    get:      id: ${id}
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    ClusterId string
    [string] ID of the Kafka Cluster that the topic belongs to.
    Location string
    [string] The location of the Kafka Cluster Topic. Possible values: de/fra, de/txl. If this is not set and if no value is provided for the IONOS_API_URL env var, the default location will be: de/fra.
    Name string
    [string] Name of the Kafka Cluster.
    NumberOfPartitions int
    [int] The number of partitions of the topic. Partitions allow for parallel processing of messages. The partition count must be greater than or equal to the replication factor. Minimum value: 1. Default value: 3.
    ReplicationFactor int
    [int] The number of replicas of the topic. The replication factor determines how many copies of the topic are stored on different brokers. The replication factor must be less than or equal to the number of brokers in the Kafka Cluster. Minimum value: 1. Default value: 3.
    RetentionTime int
    [int] This configuration controls the maximum time we will retain a log before we will discard old log segments to free up space. This represents an SLA on how soon consumers must read their data. If set to -1, no time limit is applied. Default value: 604800000.
    SegmentBytes int
    [int] This configuration controls the segment file size for the log. Retention and cleaning is always done a file at a time so a larger segment size means fewer files but less granular control over retention. Default value: 1073741824.
    ClusterId string
    [string] ID of the Kafka Cluster that the topic belongs to.
    Location string
    [string] The location of the Kafka Cluster Topic. Possible values: de/fra, de/txl. If this is not set and if no value is provided for the IONOS_API_URL env var, the default location will be: de/fra.
    Name string
    [string] Name of the Kafka Cluster.
    NumberOfPartitions int
    [int] The number of partitions of the topic. Partitions allow for parallel processing of messages. The partition count must be greater than or equal to the replication factor. Minimum value: 1. Default value: 3.
    ReplicationFactor int
    [int] The number of replicas of the topic. The replication factor determines how many copies of the topic are stored on different brokers. The replication factor must be less than or equal to the number of brokers in the Kafka Cluster. Minimum value: 1. Default value: 3.
    RetentionTime int
    [int] This configuration controls the maximum time we will retain a log before we will discard old log segments to free up space. This represents an SLA on how soon consumers must read their data. If set to -1, no time limit is applied. Default value: 604800000.
    SegmentBytes int
    [int] This configuration controls the segment file size for the log. Retention and cleaning is always done a file at a time so a larger segment size means fewer files but less granular control over retention. Default value: 1073741824.
    clusterId String
    [string] ID of the Kafka Cluster that the topic belongs to.
    location String
    [string] The location of the Kafka Cluster Topic. Possible values: de/fra, de/txl. If this is not set and if no value is provided for the IONOS_API_URL env var, the default location will be: de/fra.
    name String
    [string] Name of the Kafka Cluster.
    numberOfPartitions Integer
    [int] The number of partitions of the topic. Partitions allow for parallel processing of messages. The partition count must be greater than or equal to the replication factor. Minimum value: 1. Default value: 3.
    replicationFactor Integer
    [int] The number of replicas of the topic. The replication factor determines how many copies of the topic are stored on different brokers. The replication factor must be less than or equal to the number of brokers in the Kafka Cluster. Minimum value: 1. Default value: 3.
    retentionTime Integer
    [int] This configuration controls the maximum time we will retain a log before we will discard old log segments to free up space. This represents an SLA on how soon consumers must read their data. If set to -1, no time limit is applied. Default value: 604800000.
    segmentBytes Integer
    [int] This configuration controls the segment file size for the log. Retention and cleaning is always done a file at a time so a larger segment size means fewer files but less granular control over retention. Default value: 1073741824.
    clusterId string
    [string] ID of the Kafka Cluster that the topic belongs to.
    location string
    [string] The location of the Kafka Cluster Topic. Possible values: de/fra, de/txl. If this is not set and if no value is provided for the IONOS_API_URL env var, the default location will be: de/fra.
    name string
    [string] Name of the Kafka Cluster.
    numberOfPartitions number
    [int] The number of partitions of the topic. Partitions allow for parallel processing of messages. The partition count must be greater than or equal to the replication factor. Minimum value: 1. Default value: 3.
    replicationFactor number
    [int] The number of replicas of the topic. The replication factor determines how many copies of the topic are stored on different brokers. The replication factor must be less than or equal to the number of brokers in the Kafka Cluster. Minimum value: 1. Default value: 3.
    retentionTime number
    [int] This configuration controls the maximum time we will retain a log before we will discard old log segments to free up space. This represents an SLA on how soon consumers must read their data. If set to -1, no time limit is applied. Default value: 604800000.
    segmentBytes number
    [int] This configuration controls the segment file size for the log. Retention and cleaning is always done a file at a time so a larger segment size means fewer files but less granular control over retention. Default value: 1073741824.
    cluster_id str
    [string] ID of the Kafka Cluster that the topic belongs to.
    location str
    [string] The location of the Kafka Cluster Topic. Possible values: de/fra, de/txl. If this is not set and if no value is provided for the IONOS_API_URL env var, the default location will be: de/fra.
    name str
    [string] Name of the Kafka Cluster.
    number_of_partitions int
    [int] The number of partitions of the topic. Partitions allow for parallel processing of messages. The partition count must be greater than or equal to the replication factor. Minimum value: 1. Default value: 3.
    replication_factor int
    [int] The number of replicas of the topic. The replication factor determines how many copies of the topic are stored on different brokers. The replication factor must be less than or equal to the number of brokers in the Kafka Cluster. Minimum value: 1. Default value: 3.
    retention_time int
    [int] This configuration controls the maximum time we will retain a log before we will discard old log segments to free up space. This represents an SLA on how soon consumers must read their data. If set to -1, no time limit is applied. Default value: 604800000.
    segment_bytes int
    [int] This configuration controls the segment file size for the log. Retention and cleaning is always done a file at a time so a larger segment size means fewer files but less granular control over retention. Default value: 1073741824.
    clusterId String
    [string] ID of the Kafka Cluster that the topic belongs to.
    location String
    [string] The location of the Kafka Cluster Topic. Possible values: de/fra, de/txl. If this is not set and if no value is provided for the IONOS_API_URL env var, the default location will be: de/fra.
    name String
    [string] Name of the Kafka Cluster.
    numberOfPartitions Number
    [int] The number of partitions of the topic. Partitions allow for parallel processing of messages. The partition count must be greater than or equal to the replication factor. Minimum value: 1. Default value: 3.
    replicationFactor Number
    [int] The number of replicas of the topic. The replication factor determines how many copies of the topic are stored on different brokers. The replication factor must be less than or equal to the number of brokers in the Kafka Cluster. Minimum value: 1. Default value: 3.
    retentionTime Number
    [int] This configuration controls the maximum time we will retain a log before we will discard old log segments to free up space. This represents an SLA on how soon consumers must read their data. If set to -1, no time limit is applied. Default value: 604800000.
    segmentBytes Number
    [int] This configuration controls the segment file size for the log. Retention and cleaning is always done a file at a time so a larger segment size means fewer files but less granular control over retention. Default value: 1073741824.

    Import

    Kafka Cluster Topic can be imported using the location, kafka cluster id and the kafka cluster topic id:

    $ pulumi import ionoscloud:kafka/topic:Topic my_topic location:kafka cluster uuid:kafka cluster topic uuid
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    ionoscloud ionos-cloud/pulumi-ionoscloud
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the ionoscloud Terraform Provider.
    ionoscloud logo
    IonosCloud v0.2.3 published on Tuesday, May 13, 2025 by ionos-cloud